code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def __snake_case ( __UpperCamelCase : int = 100 ):
"""simple docstring"""
A_ = n * (n + 1) * (2 * n + 1) / 6
A_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"{solution() = }") | 86 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a = parser.parse_args()
if args.model_type == "bert":
_a = BertForMaskedLM.from_pretrained(args.model_name)
_a = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_a = model.state_dict()
_a = {}
for w in ["word_embeddings", "position_embeddings"]:
_a = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_a = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_a = state_dict["""cls.predictions.decoder.weight"""]
_a = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[F"""cls.predictions.transform.dense.{w}"""]
_a = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 19 | 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
snake_case_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE__ ( __a ):
def __init__( self , *lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
"""simple docstring"""
super().__init__(*a_ , **a_ )
SCREAMING_SNAKE_CASE_ : int = eval_examples
SCREAMING_SNAKE_CASE_ : Optional[int] = post_process_function
SCREAMING_SNAKE_CASE_ : Dict = quant_trainer_args
SCREAMING_SNAKE_CASE_ : Optional[int] = 128 # default number of calibration samples
def __lowerCamelCase ( self , lowercase__=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
SCREAMING_SNAKE_CASE_ : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE_ : List[Any] = self._remove_unused_columns(a_ , description="Calibration" )
return DataLoader(
a_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=a_ , )
def __lowerCamelCase ( self , lowercase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_calib_dataloader(a_ )
SCREAMING_SNAKE_CASE_ : int = self.model
quant_trainer.configure_model(a_ , self.quant_trainer_args , calib=a_ )
model.eval()
quant_trainer.enable_calibration(a_ )
logger.info("***** Running calibration *****" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(a_ ):
# Prediction step
SCREAMING_SNAKE_CASE_ : str = self.prediction_step(a_ , a_ , prediction_loss_only=a_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(a_ , self.quant_trainer_args )
SCREAMING_SNAKE_CASE_ : str = model
def __lowerCamelCase ( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = "eval" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ : int = self.get_eval_dataloader(a_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : int = self.compute_metrics
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : str = eval_loop(
a_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , )
finally:
SCREAMING_SNAKE_CASE_ : Any = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE_ : Any = self.post_process_function(a_ , a_ , output.predictions )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ : Dict = metrics.pop(a_ )
self.log(a_ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , a_ )
return metrics
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__ = "test" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_test_dataloader(a_ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : List[str] = eval_loop(
a_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , )
finally:
SCREAMING_SNAKE_CASE_ : List[str] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ : List[Any] = self.post_process_function(a_ , a_ , output.predictions , "predict" )
SCREAMING_SNAKE_CASE_ : int = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ : List[str] = metrics.pop(a_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a_ )
def __lowerCamelCase ( self , lowercase__="./" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.eval_dataset
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_eval_dataloader(a_ )
SCREAMING_SNAKE_CASE_ : str = next(iter(a_ ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE_ : List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(v.to(a_ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = self.model.to(a_ )
model.eval()
model.float()
SCREAMING_SNAKE_CASE_ : List[Any] = model.module if hasattr(a_ , "module" ) else model
quant_trainer.configure_model(a_ , self.quant_trainer_args )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(a_ , "model.onnx" )
logger.info(F"exporting model to {output_model_file}" )
SCREAMING_SNAKE_CASE_ : List[Any] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
a_ , a_ , a_ , export_params=a_ , opset_version=13 , do_constant_folding=a_ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=a_ , )
logger.info("onnx export finished" )
| 706 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
"""simple docstring"""
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = load_from_cache_file
SCREAMING_SNAKE_CASE_ : Optional[int] = file_format
SCREAMING_SNAKE_CASE_ : List[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 68 | 0 |
from __future__ import annotations
from random import random
class __A :
"""simple docstring"""
def __init__( self , a__ = None):
"""simple docstring"""
_lowerCamelCase : Any = value
_lowerCamelCase : List[str] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1)
def __str__( self):
"""simple docstring"""
_lowerCamelCase : List[str] = str(self.value) + ''' '''
_lowerCamelCase : List[str] = str(self.left or '''''')
_lowerCamelCase : List[Any] = str(self.right or '''''')
return value + left + right
def __UpperCAmelCase( lowercase_ , lowercase_ ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : List[Any] = split(root.left , lowercase_ )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Any = split(root.right , lowercase_ )
return root, right
def __UpperCAmelCase( lowercase_ , lowercase_ ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , lowercase_ )
return left
else:
_lowerCamelCase : List[str] = merge(lowercase_ , right.left )
return right
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[int] = Node(lowercase_ )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = split(lowercase_ , lowercase_ )
return merge(merge(lowercase_ , lowercase_ ) , lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase, _lowerCamelCase : List[str] = split(lowercase_ , value - 1 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = split(lowercase_ , lowercase_ )
return merge(lowercase_ , lowercase_ )
def __UpperCAmelCase( lowercase_ ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : int = insert(lowercase_ , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Any = erase(lowercase_ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def __UpperCAmelCase( ):
_lowerCamelCase : Optional[Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : Dict = interact_treap(lowercase_ , lowercase_ )
print(lowercase_ )
_lowerCamelCase : str = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 114 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = ["""input_features"""]
def __init__( self , a__=80 , a__=1_6000 , a__=160 , a__=30 , a__=400 , a__=0.0 , a__=False , **a__ , ):
"""simple docstring"""
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_lowerCamelCase : int = n_fft
_lowerCamelCase : List[Any] = hop_length
_lowerCamelCase : Tuple = chunk_length
_lowerCamelCase : Optional[Any] = chunk_length * sampling_rate
_lowerCamelCase : Optional[Any] = self.n_samples // hop_length
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=a__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Any = spectrogram(
a__ , window_function(self.n_fft , '''hann''') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
_lowerCamelCase : str = log_spec[:, :-1]
_lowerCamelCase : Union[str, Any] = np.maximum(a__ , log_spec.max() - 8.0)
_lowerCamelCase : Optional[int] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __snake_case ( a__ , a__ , a__ = 0.0):
"""simple docstring"""
if attention_mask is not None:
_lowerCamelCase : Dict = np.array(a__ , np.intaa)
_lowerCamelCase : Any = []
for vector, length in zip(a__ , attention_mask.sum(-1)):
_lowerCamelCase : Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
_lowerCamelCase : Optional[int] = padding_value
normed_input_values.append(a__)
else:
_lowerCamelCase : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__( self , a__ , a__ = True , a__ = None , a__ = None , a__ = None , a__ = "max_length" , a__ = None , a__ = None , a__ = None , **a__ , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
_lowerCamelCase : Dict = isinstance(a__ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
_lowerCamelCase : int = is_batched_numpy or (
isinstance(a__ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray):
_lowerCamelCase : str = np.asarray(a__ , dtype=np.floataa)
elif isinstance(a__ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_lowerCamelCase : str = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_lowerCamelCase : List[Any] = [np.asarray([raw_speech]).T]
_lowerCamelCase : List[str] = BatchFeature({'''input_features''': raw_speech})
# convert into correct format for padding
_lowerCamelCase : Optional[Any] = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_lowerCamelCase : Dict = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
_lowerCamelCase : int = np.stack(padded_inputs['''input_features'''] , axis=0)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get('''input_features''').transpose(2 , 0 , 1)
_lowerCamelCase : Tuple = [self._np_extract_fbank_features(a__) for waveform in input_features[0]]
if isinstance(input_features[0] , a__):
_lowerCamelCase : str = [np.asarray(a__ , dtype=np.floataa) for feature in input_features]
else:
_lowerCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_lowerCamelCase : Optional[int] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
_lowerCamelCase : str = padded_inputs.convert_to_tensors(a__)
return padded_inputs
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
_lowerCamelCase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 114 | 1 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
if hor == 1_28:
lowercase_ : Union[str, Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowercase_ : List[Any] = (32, 1_28, 2_56)
lowercase_ : Tuple = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowercase_ : Dict = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowercase_ : str = (32, 64, 1_28, 2_56)
lowercase_ : Tuple = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowercase_ : Optional[Any] = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowercase_ : str = model.state_dict()
lowercase_ : Any = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowercase_ : Any = UNetaDModel(**__SCREAMING_SNAKE_CASE )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowercase_ : int = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase_ : Union[str, Any] = state_dict.pop(__SCREAMING_SNAKE_CASE )
hf_value_function.load_state_dict(__SCREAMING_SNAKE_CASE )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( ):
lowercase_ : Tuple = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowercase_ : List[str] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowercase_ : str = model
lowercase_ : List[str] = UNetaDModel(**__SCREAMING_SNAKE_CASE )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowercase_ : List[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase_ : List[str] = state_dict.pop(__SCREAMING_SNAKE_CASE )
hf_value_function.load_state_dict(__SCREAMING_SNAKE_CASE )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 477 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE =get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = DebertaVaTokenizer
lowercase = DebertaVaTokenizerFast
lowercase = True
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = DebertaVaTokenizer(__UpperCamelCase ,unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = 'this is a test'
lowercase_ : str = 'this is a test'
return input_text, output_text
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = '<pad>'
lowercase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'<unk>' )
self.assertEqual(vocab_keys[-1] ,'[PAD]' )
self.assertEqual(len(__UpperCamelCase ) ,3_0001 )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,3_0000 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[Any] = ' \tHeLLo!how \n Are yoU? '
lowercase_ : Optional[int] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
lowercase_ : int = DebertaVaTokenizer(__UpperCamelCase ,do_lower_case=__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[Any] = DebertaVaTokenizerFast(__UpperCamelCase ,do_lower_case=__UpperCamelCase )
lowercase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'I was born in 92000, and this is falsé.'
lowercase_ : Union[str, Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
lowercase_ : List[str] = DebertaVaTokenizer(__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Any = DebertaVaTokenizerFast(__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = 'I was born in 92000, and this is falsé.'
lowercase_ : Tuple = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
lowercase_ : List[str] = DebertaVaTokenizer(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Tuple = DebertaVaTokenizerFast(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'I was born in 92000, and this is falsé.'
lowercase_ : Optional[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
lowercase_ : Union[str, Any] = DebertaVaTokenizer(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Tuple = DebertaVaTokenizerFast(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = 'I was born in 92000, and this is falsé.'
lowercase_ : Optional[int] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
lowercase_ : Optional[int] = DebertaVaTokenizer(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = DebertaVaTokenizerFast(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Tuple = ' \tHeLLo!how \n Are yoU? '
lowercase_ : List[str] = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
lowercase_ : str = DebertaVaTokenizer(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = DebertaVaTokenizerFast(__UpperCamelCase ,do_lower_case=__UpperCamelCase ,split_by_punct=__UpperCamelCase )
lowercase_ : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : int = self.get_rust_tokenizer()
lowercase_ : Dict = 'I was born in 92000, and this is falsé.'
lowercase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
lowercase_ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
lowercase_ : str = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = self.get_rust_tokenizer()
lowercase_ : Optional[Any] = tokenizer.encode(__UpperCamelCase )
lowercase_ : Dict = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = 'This is a test'
lowercase_ : Any = [13, 1, 4398, 25, 21, 1289]
lowercase_ : Union[str, Any] = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
lowercase_ : str = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
lowercase_ : Optional[int] = DebertaVaTokenizer(__UpperCamelCase ,keep_accents=__UpperCamelCase )
lowercase_ : str = DebertaVaTokenizerFast(__UpperCamelCase ,keep_accents=__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Any = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
# fmt: off
lowercase_ : List[Any] = 'I was born in 92000, and this is falsé.'
lowercase_ : int = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase_ : List[str] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
lowercase_ : List[str] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
lowercase_ : List[str] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[str] = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Tuple = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = DebertaVaTokenizer(__UpperCamelCase )
lowercase_ : int = tokenizer.encode('sequence builders' )
lowercase_ : Tuple = tokenizer.encode('multi-sequence build' )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,__UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,__UpperCamelCase ,)
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase ,model_name='microsoft/deberta-v2-xlarge' ,revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' ,)
| 477 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase ( __UpperCamelCase ):
__a = """informer"""
__a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "student_t" , SCREAMING_SNAKE_CASE__ = "nll" , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "mean" , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 64 , SCREAMING_SNAKE_CASE__ = 32 , SCREAMING_SNAKE_CASE__ = 32 , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "gelu" , SCREAMING_SNAKE_CASE__ = 0.05 , SCREAMING_SNAKE_CASE__ = 0.1 , SCREAMING_SNAKE_CASE__ = 0.1 , SCREAMING_SNAKE_CASE__ = 0.1 , SCREAMING_SNAKE_CASE__ = 0.1 , SCREAMING_SNAKE_CASE__ = 100 , SCREAMING_SNAKE_CASE__ = 0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__ = "prob" , SCREAMING_SNAKE_CASE__ = 5 , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = prediction_length
lowerCAmelCase__ : Dict = context_length or prediction_length
lowerCAmelCase__ : List[Any] = distribution_output
lowerCAmelCase__ : List[Any] = loss
lowerCAmelCase__ : Union[str, Any] = input_size
lowerCAmelCase__ : Tuple = num_time_features
lowerCAmelCase__ : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ : List[Any] = scaling
lowerCAmelCase__ : Union[str, Any] = num_dynamic_real_features
lowerCAmelCase__ : Optional[int] = num_static_real_features
lowerCAmelCase__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCAmelCase__ : Any = cardinality
else:
lowerCAmelCase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCAmelCase__ : Optional[int] = embedding_dimension
else:
lowerCAmelCase__ : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase__ : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase__ : int = d_model
lowerCAmelCase__ : List[Any] = encoder_attention_heads
lowerCAmelCase__ : Any = decoder_attention_heads
lowerCAmelCase__ : Dict = encoder_ffn_dim
lowerCAmelCase__ : List[Any] = decoder_ffn_dim
lowerCAmelCase__ : Any = encoder_layers
lowerCAmelCase__ : Optional[Any] = decoder_layers
lowerCAmelCase__ : Tuple = dropout
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : Any = activation_dropout
lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase__ : int = decoder_layerdrop
lowerCAmelCase__ : Optional[Any] = activation_function
lowerCAmelCase__ : int = init_std
lowerCAmelCase__ : Optional[int] = use_cache
# Informer
lowerCAmelCase__ : Any = attention_type
lowerCAmelCase__ : Tuple = sampling_factor
lowerCAmelCase__ : List[str] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowercase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 233 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = "▁"
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
UpperCamelCase_ = {
"google/pegasus-xsum": 5_12,
}
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : int="<pad>" , snake_case_ : Any="</s>" , snake_case_ : List[Any]="<unk>" , snake_case_ : Optional[Any]="<mask_2>" , snake_case_ : Union[str, Any]="<mask_1>" , snake_case_ : Any=None , snake_case_ : str=103 , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Union[str, Any] , ):
"""simple docstring"""
A : str = offset
if additional_special_tokens is not None:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(snake_case_ )}, but is"""
f""" {type(snake_case_ )}""" )
A : int = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(snake_case_ ) , self.offset - 1 )
]
if len(set(snake_case_ ) ) != len(snake_case_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
A : Union[str, Any] = additional_special_tokens_extended
else:
A : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , mask_token=snake_case_ , pad_token=snake_case_ , mask_token_sent=snake_case_ , offset=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
A : Dict = mask_token_sent
A : Optional[int] = vocab_file
A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# add special tokens to encoder dict
A : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
A : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Dict = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
"""simple docstring"""
A : Optional[Any] = self.__dict__.copy()
A : Union[str, Any] = None
return state
def __setstate__( self : str , snake_case_ : Tuple ):
"""simple docstring"""
A : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A : Union[str, Any] = {}
A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Tuple , snake_case_ : str ):
"""simple docstring"""
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def _UpperCAmelCase ( self : Tuple , snake_case_ : str ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A : int = self.sp_model.piece_to_id(snake_case_ )
return sp_id + self.offset
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : int ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A : Any = self.sp_model.IdToPiece(index - self.offset )
return token
def _UpperCAmelCase ( self : Dict , snake_case_ : Dict ):
"""simple docstring"""
A : List[Any] = []
A : Union[str, Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
A : Any = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def _UpperCAmelCase ( self : str , snake_case_ : Any=False ):
"""simple docstring"""
return 1
def _UpperCAmelCase ( self : int , snake_case_ : Union[str, Any] ):
"""simple docstring"""
A : str = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCAmelCase ( self : int , snake_case_ : List , snake_case_ : Optional[List] = None , snake_case_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(snake_case_ )
elif token_ids_a is None:
return self._special_token_mask(snake_case_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCAmelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : Tuple=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : int = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
A : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,) | 256 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : List[str] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "audio-spectrogram-transformer"
def __init__( self , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=16 , UpperCamelCase=True , UpperCamelCase=10 , UpperCamelCase=10 , UpperCamelCase=1024 , UpperCamelCase=128 , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = patch_size
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = frequency_stride
lowerCamelCase_ = time_stride
lowerCamelCase_ = max_length
lowerCamelCase_ = num_mel_bins
| 445 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=1 / 255 , UpperCamelCase=True , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def snake_case ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
lowerCamelCase_ ,lowerCamelCase_ = image.size
else:
lowerCamelCase_ ,lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ = self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = self.size["shortest_edge"]
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ ,lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = DetrImageProcessor if is_vision_available() else None
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(UpperCamelCase , "rescale_factor" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase , "size" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_pad" ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image and target
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"image_id": 3_9769, "annotations": target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowerCamelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify masks
lowerCamelCase_ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
| 445 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __A( UpperCAmelCase ):
@staticmethod
@abstractmethod
def lowercase__ ( __UpperCamelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self : List[str] ):
raise NotImplementedError()
| 272 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase = None
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
lowercase = {
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
lowercase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE = MBartTokenizer
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def __init__( self : Any , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : Optional[Any]="<unk>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Dict="<mask>" , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
vocab_file=__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(__UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else """en_XX"""
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase__ ( self : List[str] ):
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Optional[int] , __UpperCamelCase : str ):
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] , __UpperCamelCase : Optional[str] , **__UpperCamelCase : Dict ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = self.convert_tokens_to_ids(__UpperCamelCase )
lowerCamelCase_ = tgt_lang_id
return inputs
def lowercase__ ( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str = "en_XX" , __UpperCamelCase : Optional[List[str]] = None , __UpperCamelCase : str = "ro_RO" , **__UpperCamelCase : Optional[int] , ):
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : int ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Dict ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : Dict , __UpperCamelCase : Dict ):
lowerCamelCase_ = self.convert_tokens_to_ids(__UpperCamelCase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase__ ( self : Dict , __UpperCamelCase : str ):
lowerCamelCase_ = self.convert_tokens_to_ids(__UpperCamelCase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 272 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase_ = logging.get_logger(__name__)
# General docstring
lowerCamelCase_ = '''RegNetConfig'''
# Base docstring
lowerCamelCase_ = '''facebook/regnet-y-040'''
lowerCamelCase_ = [1, 1088, 7, 7]
# Image classification docstring
lowerCamelCase_ = '''facebook/regnet-y-040'''
lowerCamelCase_ = '''tabby, tabby cat'''
lowerCamelCase_ = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase , lowerCamelCase = 3 , lowerCamelCase = 1 , lowerCamelCase = 1 , lowerCamelCase = "relu" , **lowerCamelCase , ) -> Dict:
super().__init__(**lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
snake_case_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
snake_case_ = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=lowerCamelCase , strides=lowerCamelCase , padding="""VALID""" , groups=lowerCamelCase , use_bias=lowerCamelCase , name="""convolution""" , )
snake_case_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
snake_case_ = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Dict:
snake_case_ = self.convolution(self.padding(lowerCamelCase ) )
snake_case_ = self.normalization(lowerCamelCase )
snake_case_ = self.activation(lowerCamelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> List[Any]:
super().__init__(**lowerCamelCase )
snake_case_ = config.num_channels
snake_case_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> int:
snake_case_ = shape_list(lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
snake_case_ = tf.transpose(lowerCamelCase , perm=(0, 2, 3, 1) )
snake_case_ = self.embedder(lowerCamelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase , lowerCamelCase = 2 , **lowerCamelCase ) -> Any:
super().__init__(**lowerCamelCase )
snake_case_ = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=1 , strides=lowerCamelCase , use_bias=lowerCamelCase , name="""convolution""" )
snake_case_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(lowerCamelCase ) , training=lowerCamelCase )
class __lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> int:
super().__init__(**lowerCamelCase )
snake_case_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name="""pooler""" )
snake_case_ = [
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCAmelCase_ ( self , lowerCamelCase ) -> int:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
snake_case_ = self.pooler(lowerCamelCase )
for layer_module in self.attention:
snake_case_ = layer_module(lowerCamelCase )
snake_case_ = hidden_state * pooled
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , **lowerCamelCase ) -> List[str]:
super().__init__(**lowerCamelCase )
snake_case_ = in_channels != out_channels or stride != 1
snake_case_ = max(1 , out_channels // config.groups_width )
snake_case_ = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
snake_case_ = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name="""layer.2""" ),
]
snake_case_ = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Any:
snake_case_ = hidden_state
for layer_module in self.layers:
snake_case_ = layer_module(lowerCamelCase )
snake_case_ = self.shortcut(lowerCamelCase )
hidden_state += residual
snake_case_ = self.activation(lowerCamelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , **lowerCamelCase ) -> str:
super().__init__(**lowerCamelCase )
snake_case_ = in_channels != out_channels or stride != 1
snake_case_ = max(1 , out_channels // config.groups_width )
snake_case_ = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
snake_case_ = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name="""layer.3""" ),
]
snake_case_ = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Any:
snake_case_ = hidden_state
for layer_module in self.layers:
snake_case_ = layer_module(lowerCamelCase )
snake_case_ = self.shortcut(lowerCamelCase )
hidden_state += residual
snake_case_ = self.activation(lowerCamelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 2 , **lowerCamelCase ) -> List[str]:
super().__init__(**lowerCamelCase )
snake_case_ = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
snake_case_ = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , stride=lowerCamelCase , name="""layers.0""" ),
*[layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Dict:
for layer_module in self.layers:
snake_case_ = layer_module(lowerCamelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> Tuple:
super().__init__(**lowerCamelCase )
snake_case_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase , lowerCamelCase , lowerCamelCase , depth=lowerCamelCase , name=f'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True ) -> TFBaseModelOutputWithNoAttention:
snake_case_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case_ = hidden_states + (hidden_state,)
snake_case_ = stage_module(lowerCamelCase )
if output_hidden_states:
snake_case_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase , hidden_states=lowerCamelCase )
@keras_serializable
class __lowerCamelCase ( tf.keras.layers.Layer ):
lowerCamelCase_ : List[str] = RegNetConfig
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
super().__init__(**lowerCamelCase )
snake_case_ = config
snake_case_ = TFRegNetEmbeddings(lowerCamelCase , name="""embedder""" )
snake_case_ = TFRegNetEncoder(lowerCamelCase , name="""encoder""" )
snake_case_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name="""pooler""" )
@unpack_inputs
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.embedder(lowerCamelCase , training=lowerCamelCase )
snake_case_ = self.encoder(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
snake_case_ = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
snake_case_ = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
snake_case_ = tuple([tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase , pooler_output=lowerCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Any = RegNetConfig
lowerCamelCase_ : Optional[int] = 'regnet'
lowerCamelCase_ : Union[str, Any] = 'pixel_values'
@property
def lowerCAmelCase_ ( self ) -> Dict:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCamelCase_ = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase_ = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __snake_case , )
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> Tuple:
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
snake_case_ = TFRegNetMainLayer(lowerCamelCase , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.regnet(
pixel_values=lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __snake_case , )
class __lowerCamelCase ( __snake_case , __snake_case ):
def __init__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> Optional[Any]:
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
snake_case_ = config.num_labels
snake_case_ = TFRegNetMainLayer(lowerCamelCase , name="""regnet""" )
# classification head
snake_case_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.regnet(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
snake_case_ = outputs.pooler_output if return_dict else outputs[1]
snake_case_ = self.classifier[0](lowerCamelCase )
snake_case_ = self.classifier[1](lowerCamelCase )
snake_case_ = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase , logits=lowerCamelCase )
if not return_dict:
snake_case_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states ) | 720 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'vit_msn'
def __init__( self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-06 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=True , **lowerCamelCase , ) -> Optional[int]:
super().__init__(**lowerCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias | 161 | 0 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__UpperCamelCase = get_tests_dir("fixtures/vocab.json")
__UpperCamelCase = get_tests_dir("fixtures")
class _A ( unittest.TestCase ):
lowercase__: str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = 0
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = WavaVecaConfig()
__snake_case : List[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
__snake_case : Optional[int] = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
copyfile(__magic_name__ , os.path.join(__magic_name__ , """vocab.json""" ) )
__snake_case : Optional[Any] = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Any = WavaVecaFeatureExtractor()
__snake_case : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
__snake_case : Union[str, Any] = WavaVecaProcessor(__magic_name__ , __magic_name__ )
# save in new folder
processor.save_pretrained(__magic_name__ )
# drop `processor_class` in tokenizer
with open(os.path.join(__magic_name__ , __magic_name__ ) , """r""" ) as f:
__snake_case : Optional[Any] = json.load(__magic_name__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f:
f.write(json.dumps(__magic_name__ ) )
__snake_case : List[str] = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[int] = WavaVecaFeatureExtractor()
__snake_case : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
__snake_case : Optional[Any] = WavaVecaProcessor(__magic_name__ , __magic_name__ )
# save in new folder
processor.save_pretrained(__magic_name__ )
# drop `processor_class` in feature extractor
with open(os.path.join(__magic_name__ , __magic_name__ ) , """r""" ) as f:
__snake_case : str = json.load(__magic_name__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f:
f.write(json.dumps(__magic_name__ ) )
__snake_case : Union[str, Any] = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__magic_name__ )
# copy relevant files
copyfile(__magic_name__ , os.path.join(__magic_name__ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f:
f.write("""{}""" )
__snake_case : int = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
with self.assertRaises(__magic_name__ ):
__snake_case : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__magic_name__ ):
__snake_case : Union[str, Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
__snake_case : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
__snake_case : Tuple = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
__snake_case : Any = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__snake_case : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
__snake_case : Optional[int] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoFeatureExtractor.register(__magic_name__ , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoProcessor.register(__magic_name__ , __magic_name__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoProcessor.register(__magic_name__ , __magic_name__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : Union[str, Any] = CustomFeatureExtractor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : str = os.path.join(__magic_name__ , """vocab.txt""" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__snake_case : int = CustomTokenizer(__magic_name__ )
__snake_case : str = CustomProcessor(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__magic_name__ )
__snake_case : Dict = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
class _A ( __lowercase ):
lowercase__: Optional[Any] = False
class _A ( __lowercase ):
lowercase__: int = False
class _A ( __lowercase ):
lowercase__: List[Any] = '''AutoFeatureExtractor'''
lowercase__: Tuple = '''AutoTokenizer'''
lowercase__: Union[str, Any] = False
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoFeatureExtractor.register(__magic_name__ , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoProcessor.register(__magic_name__ , __magic_name__ )
# If remote code is not set, the default is to use local classes.
__snake_case : Any = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__snake_case : Optional[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__snake_case : List[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case : int = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _A ( unittest.TestCase ):
lowercase__: Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowercase__ ( cls : str ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def lowercase__ ( cls : Dict ) -> str:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : int = WavaVecaProcessor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__magic_name__ , """test-processor""" ) , push_to_hub=__magic_name__ , use_auth_token=self._token )
__snake_case : Union[str, Any] = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(new_processor.feature_extractor , __magic_name__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = WavaVecaProcessor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__magic_name__ , """test-processor-org""" ) , push_to_hub=__magic_name__ , use_auth_token=self._token , organization="""valid_org""" , )
__snake_case : Tuple = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(new_processor.feature_extractor , __magic_name__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__snake_case : Optional[int] = CustomFeatureExtractor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = os.path.join(__magic_name__ , """vocab.txt""" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__snake_case : Union[str, Any] = CustomTokenizer(__magic_name__ )
__snake_case : Optional[int] = CustomProcessor(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
__snake_case : str = Repository(__magic_name__ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__magic_name__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) ) as f:
__snake_case : Dict = json.load(__magic_name__ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_processing.py""" ) ) )
repo.push_to_hub()
__snake_case : List[str] = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 26 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = BertTokenizer
UpperCAmelCase__ = BertTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = filter_non_english
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = '''unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [9, 6, 7, 12, 10, 11] )
def _lowercase (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# With lower casing
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer()
SCREAMING_SNAKE_CASE_ = '''a\n\'ll !!to?\'d of, can\'t.'''
SCREAMING_SNAKE_CASE_ = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
SCREAMING_SNAKE_CASE_ = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def _lowercase (self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowercase (self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowercase (self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def _lowercase (self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , '''do_lower_case''' ) else False
SCREAMING_SNAKE_CASE_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''的''', '''人''', '''有''']
SCREAMING_SNAKE_CASE_ = ''''''.join(SCREAMING_SNAKE_CASE_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 720 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( __a ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', __a, )
if isinstance(__a, torch.Tensor ):
return image
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(__a ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return image
def _lowerCamelCase ( __a ):
if isinstance(__a, torch.Tensor ):
return mask
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = mask[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(mask[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = _preprocess_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = original_image.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
SCREAMING_SNAKE_CASE_ = eta
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ ) | 628 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
# word like '180' or '身高' or '神'
for char in word:
a_ : Union[str, Any] = ord(_SCREAMING_SNAKE_CASE )
if not _is_chinese_char(_SCREAMING_SNAKE_CASE ):
return 0
return 1
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] ) -> Dict:
a_ : int = set()
for token in tokens:
a_ : Any = len(_SCREAMING_SNAKE_CASE ) > 1 and is_chinese(_SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(_SCREAMING_SNAKE_CASE )
a_ : int = list(_SCREAMING_SNAKE_CASE )
return word_list
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :set() ) -> Dict:
if not chinese_word_set:
return bert_tokens
a_ : Dict = max([len(_SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
a_ : int = bert_tokens
a_ , a_ : int = 0, len(_SCREAMING_SNAKE_CASE )
while start < end:
a_ : List[Any] = True
if is_chinese(bert_word[start] ):
a_ : Dict = min(end - start , _SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , 1 , -1 ):
a_ : Optional[int] = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ : Any = "##" + bert_word[j]
a_ : int = start + i
a_ : Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :LTP , _SCREAMING_SNAKE_CASE :BertTokenizer ) -> str:
a_ : Union[str, Any] = []
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 100 ):
a_ : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
a_ : Union[str, Any] = [get_chinese_word(_SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
a_ : Tuple = []
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 100 ):
a_ : Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
a_ : int = []
for input_ids, chinese_word in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ : Any = []
for id in input_ids:
a_ : Any = bert_tokenizer._convert_id_to_token(_SCREAMING_SNAKE_CASE )
input_tokens.append(_SCREAMING_SNAKE_CASE )
a_ : int = add_sub_symbol(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
a_ : List[Any] = token[2:]
# save chinese tokens' pos
if len(_SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(_SCREAMING_SNAKE_CASE ) ):
ref_id.append(_SCREAMING_SNAKE_CASE )
ref_ids.append(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
return ref_ids
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Any ) -> str:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
a_ : Optional[Any] = f.readlines()
a_ : Optional[int] = [line.strip() for line in data if len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ : Tuple = LTP(args.ltp ) # faster in GPU device
a_ : int = BertTokenizer.from_pretrained(args.bert )
a_ : List[str] = prepare_ref(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
a_ : List[str] = [json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" for ref in ref_ids]
f.writelines(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase = parser.parse_args()
main(args)
| 473 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'sentencepiece.model'}
UpperCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCamelCase = {
'google/rembert': 2_56,
}
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
a_ : List[str] = do_lower_case
a_ : List[Any] = remove_space
a_ : int = keep_accents
a_ : str = vocab_file
a_ : Union[str, Any] = spm.SentencePieceProcessor()
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def A ( self ) -> str:
return len(self.sp_model )
def A ( self ) -> str:
a_ : Optional[int] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
a_ : Optional[Any] = self.__dict__.copy()
a_ : int = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Any:
a_ : int = d
a_ : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]:
a_ : Optional[int] = self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE )
return pieces
def A ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a_ : Tuple = self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE )
return out_string
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
a_ : Union[str, Any] = [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
a_ : Optional[int] = [self.sep_token_id]
a_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("Vocabulary path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
a_ : Tuple = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 473 | 1 |
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if n_term == "":
return []
_snake_case : list = []
for temp in range(int(lowerCAmelCase_ ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
UpperCAmelCase : List[str] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 47 |
'''simple docstring'''
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = 0, 1
while True:
_snake_case , _snake_case : List[str] = b, a + b
yield b
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
_snake_case : List[str] = 1
_snake_case : Dict = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = abs(snake_case__ )
_snake_case : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = abs(snake_case__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) )
def UpperCAmelCase__ ():
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case__ : Callable , snake_case__ : int ) -> None:
_snake_case : Tuple = F"{func.__name__}({value})"
_snake_case : Dict = timeit(F"__main__.{call}" , setup="""import __main__""" )
print(F"{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case__ , snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 609 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[str] = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ )
else:
_snake_case : List[str] = np.full((len(snake_case__ ), sequence_length) , snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[Any] = tensor[:sequence_length]
else:
_snake_case : int = tensor[:sequence_length]
else:
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[str] = tensor[:sequence_length]
else:
_snake_case : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_snake_case : Dict = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -1_00
lowercase__ = "pt"
def UpperCamelCase_ ( self: Optional[int], a_: Tuple ):
'''simple docstring'''
import torch
_snake_case : Union[str, Any] = """label""" if """label""" in features[0].keys() else """labels"""
_snake_case : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_snake_case : int = self.tokenizer.pad(
a_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="""pt""" if labels is None else None, )
if labels is None:
return batch
_snake_case : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1]
_snake_case : str = self.tokenizer.padding_side
if padding_side == "right":
_snake_case : Tuple = [
list(a_ ) + [self.label_pad_token_id] * (sequence_length - len(a_ )) for label in labels
]
else:
_snake_case : Dict = [
[self.label_pad_token_id] * (sequence_length - len(a_ )) + list(a_ ) for label in labels
]
_snake_case : str = [feature["""ner_tags"""] for feature in features]
_snake_case : Tuple = padding_tensor(a_, -1, a_, a_ )
_snake_case : Any = [feature["""original_entity_spans"""] for feature in features]
_snake_case : Union[str, Any] = padding_tensor(a_, (-1, -1), a_, a_ )
_snake_case : Optional[Any] = {k: torch.tensor(a_, dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 609 | 1 |
def lowercase ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Any ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
lowercase_ : List[str] = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
__A : List[Any] = 701
__A : str = 1_000_000_000
__A : List[Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 720 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : str = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowercase ( ):
lowercase_ : Optional[Any] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowercase_ : List[Any] = get_sagemaker_input()
else:
lowercase_ : Union[str, Any] = get_cluster_input()
return config
def lowercase ( __snake_case : Any=None ):
if subparsers is not None:
lowercase_ : Any = subparsers.add_parser('''config''' , description=__snake_case )
else:
lowercase_ : str = argparse.ArgumentParser('''Accelerate config command''' , description=__snake_case )
parser.add_argument(
'''--config_file''' , default=__snake_case , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__snake_case )
return parser
def lowercase ( __snake_case : int ):
lowercase_ : Optional[Any] = get_user_input()
if args.config_file is not None:
lowercase_ : Union[str, Any] = args.config_file
else:
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
lowercase_ : Optional[Any] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(__snake_case )
else:
config.to_yaml_file(__snake_case )
print(F'''accelerate configuration saved at {config_file}''' )
def lowercase ( ):
lowercase_ : List[str] = config_command_parser()
lowercase_ : List[str] = parser.parse_args()
config_command(__snake_case )
if __name__ == "__main__":
main()
| 141 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__A = logging.get_logger(__name__)
# General docstring
__A = "RegNetConfig"
# Base docstring
__A = "facebook/regnet-y-040"
__A = [1, 1_088, 7, 7]
# Image classification docstring
__A = "facebook/regnet-y-040"
__A = "tabby, tabby cat"
__A = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( nn.Module ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 3 , lowerCamelCase__ = 1 , lowerCamelCase__ = 1 , lowerCamelCase__ = "relu" , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase__ = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase__ = nn.BatchNormad(lowercase_ )
lowercase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def A__ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.convolution(lowercase_ )
lowercase__ = self.normalization(lowercase_ )
lowercase__ = self.activation(lowercase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
super().__init__()
lowercase__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__ = config.num_channels
def A__ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase__ = self.embedder(lowercase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 2 ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase__ = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase__ = nn.BatchNormad(lowercase_ )
def A__ ( self , lowerCamelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__ = self.convolution(lowercase_ )
lowercase__ = self.normalization(lowercase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
super().__init__()
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def A__ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = self.pooler(lowercase_ )
lowercase__ = self.attention(lowercase_ )
lowercase__ = hidden_state * attention
return hidden_state
class A ( nn.Module ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase__ = ACTaFN[config.hidden_act]
def A__ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
lowercase__ = hidden_state
lowercase__ = self.layer(lowercase_ )
lowercase__ = self.shortcut(lowercase_ )
hidden_state += residual
lowercase__ = self.activation(lowercase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase__ = ACTaFN[config.hidden_act]
def A__ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = hidden_state
lowercase__ = self.layer(lowercase_ )
lowercase__ = self.shortcut(lowercase_ )
hidden_state += residual
lowercase__ = self.activation(lowercase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase__ = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def A__ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__ = self.layers(lowercase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
super().__init__()
lowercase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(lowercase_ )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class A ( lowerCamelCase__ ):
lowerCamelCase : Any = RegNetConfig
lowerCamelCase : str = """regnet"""
lowerCamelCase : str = """pixel_values"""
lowerCamelCase : List[Any] = True
def A__ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowercase__ = value
__A = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowerCamelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A ( lowerCamelCase__ ):
def __init__( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowercase_ )
lowercase__ = config
lowercase__ = RegNetEmbeddings(lowercase_ )
lowercase__ = RegNetEncoder(lowercase_ )
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(lowercase_ )
lowercase__ = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCamelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A ( lowerCamelCase__ ):
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase_ )
lowercase__ = config.num_labels
lowercase__ = RegNetModel(lowercase_ )
# classification head
lowercase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier(lowercase_ )
lowercase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ = """single_label_classification"""
else:
lowercase__ = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase__ = MSELoss()
if self.num_labels == 1:
lowercase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ = BCEWithLogitsLoss()
lowercase__ = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 325 |
def A ( snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not head:
return True
# split the list to two parts
__snake_case , __snake_case = head.next, head
while fast and fast.next:
__snake_case = fast.next.next
__snake_case = slow.next
__snake_case = slow.next
__snake_case = None # Don't forget here! But forget still works!
# reverse the second part
__snake_case = None
while second:
__snake_case = second.next
__snake_case = node
__snake_case = second
__snake_case = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__snake_case = node.next
__snake_case = head.next
return True
def A ( snake_case__ : List[Any] ) -> int:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__snake_case = __snake_case = __snake_case = head
while fast and fast.next:
__snake_case , __snake_case = fast.next.next, slow.next
# 2. Push the second half into the stack
__snake_case = [slow.val]
while slow.next:
__snake_case = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__snake_case = cur.next
return True
def A ( snake_case__ : Dict ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
__snake_case = {}
__snake_case = 0
while head:
if head.val in d:
d[head.val].append(snake_case__ )
else:
__snake_case = [pos]
__snake_case = head.next
pos += 1
__snake_case = pos - 1
__snake_case = 0
for v in d.values():
if len(snake_case__ ) % 2 != 0:
middle += 1
else:
__snake_case = 0
for i in range(0 , len(snake_case__ ) ):
if v[i] + v[len(snake_case__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 313 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ = logging.getLogger(__name__)
def lowerCAmelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
__magic_name__ : Optional[Any] = git.Repo(search_parent_directories=UpperCAmelCase )
__magic_name__ : Tuple = {
'''repo_id''': str(UpperCAmelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(UpperCAmelCase, '''git_log.json''' ), '''w''' ) as f:
json.dump(UpperCAmelCase, UpperCAmelCase, indent=4 )
def lowerCAmelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
if params.n_gpu <= 0:
__magic_name__ : List[Any] = 0
__magic_name__ : Union[str, Any] = -1
__magic_name__ : str = True
__magic_name__ : Optional[int] = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
__magic_name__ : List[str] = int(os.environ['''WORLD_SIZE'''] )
__magic_name__ : Union[str, Any] = int(os.environ['''N_GPU_NODE'''] )
__magic_name__ : str = int(os.environ['''RANK'''] )
# number of nodes / node ID
__magic_name__ : Optional[int] = params.world_size // params.n_gpu_per_node
__magic_name__ : Any = params.global_rank // params.n_gpu_per_node
__magic_name__ : int = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
__magic_name__ : Optional[int] = 1
__magic_name__ : Any = 0
__magic_name__ : Optional[Any] = 0
__magic_name__ : List[Any] = 0
__magic_name__ : Union[str, Any] = 1
__magic_name__ : Union[str, Any] = 1
__magic_name__ : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__magic_name__ : Any = params.node_id == 0 and params.local_rank == 0
__magic_name__ : Dict = params.n_nodes > 1
# summary
__magic_name__ : List[str] = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''', backend='''nccl''', )
def lowerCAmelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 336 |
import math
import random
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = False ) ->float:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowercase_ = 0.0_2
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->float:
"""simple docstring"""
__magic_name__ : Optional[int] = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(UpperCAmelCase ):
# Forward propagation
__magic_name__ : Optional[Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__magic_name__ : Optional[int] = (expected / 100) - layer_a
# Error delta
__magic_name__ : Tuple = layer_1_error * sigmoid_function(UpperCAmelCase, UpperCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input('''Expected value: '''))
lowercase_ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 336 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCamelCase ( snake_case__ : Optional[Any]=32 , snake_case__ : Optional[int]=10 , snake_case__ : Optional[int]=100 , snake_case__ : Union[str, Any]=1026 , snake_case__ : Any=True , snake_case__ : List[str]="data/tokenized_stories_train_wikitext103.jbl" , snake_case__ : Tuple="igf_context_pairs.jbl" , ) -> str:
set_seed(3 )
# generate train_data and objective_set
UpperCamelCase : Union[str, Any] = generate_datasets(
UpperCamelCase_ , UpperCamelCase_ , number=UpperCamelCase_ , min_len=1026 , trim=UpperCamelCase_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase : Optional[Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
UpperCamelCase : Tuple = load_gpta('gpt2' ).to(UpperCamelCase_ )
print('computing perplexity on objective set' )
UpperCamelCase : List[str] = compute_perplexity(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).item()
print('perplexity on objective set:' , UpperCamelCase_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Dict=15 , snake_case__ : Tuple=128 , snake_case__ : List[str]=100 , snake_case__ : Any="igf_model.pt" , ) -> Tuple:
set_seed(42 )
# Load pre-trained model
UpperCamelCase : Tuple = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
UpperCamelCase : str = SecondaryLearner(UpperCamelCase_ )
# Train secondary learner
UpperCamelCase : Any = train_secondary_learner(
UpperCamelCase_ , UpperCamelCase_ , max_epochs=UpperCamelCase_ , batch_size=UpperCamelCase_ , eval_freq=100 , igf_model_path=UpperCamelCase_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Optional[Any]=32 , snake_case__ : List[Any]=1000 , snake_case__ : List[Any]=16 , snake_case__ : Tuple=1.0 , snake_case__ : List[Any]=recopy_gpta , snake_case__ : str=None , snake_case__ : str=10 , snake_case__ : Any="gpt2_finetuned.pt" , ) -> int:
UpperCamelCase : List[str] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
UpperCamelCase : Optional[Any] = RandomSampler(UpperCamelCase_ )
UpperCamelCase : int = DataLoader(UpperCamelCase_ , sampler=UpperCamelCase_ )
UpperCamelCase : str = max_steps // (len(UpperCamelCase_ )) + 1
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=UpperCamelCase_ )
UpperCamelCase : Tuple = recopy_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCamelCase_ )
secondary_learner.eval()
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[Any] = 0
UpperCamelCase : List[str] = []
UpperCamelCase : Union[str, Any] = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase : int = compute_perplexity(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
test_perps.append(UpperCamelCase_ )
print('Test perplexity, step' , UpperCamelCase_ , ':' , UpperCamelCase_ )
for epoch in range(int(UpperCamelCase_ ) ):
for step, example in enumerate(UpperCamelCase_ ):
torch.cuda.empty_cache()
UpperCamelCase : List[str] = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCamelCase : Optional[Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase : int = model(UpperCamelCase_ , labels=UpperCamelCase_ )
UpperCamelCase : List[Any] = True
if secondary_learner is not None:
UpperCamelCase : Union[str, Any] = secondary_learner.forward(
torch.tensor(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCamelCase_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase : Optional[int] = -1
if predicted_q < threshold:
UpperCamelCase : Any = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCamelCase : Dict = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase : int = compute_perplexity(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
test_perps.append(UpperCamelCase_ )
print('Test perplexity, step' , UpperCamelCase_ , ':' , UpperCamelCase_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , UpperCamelCase_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCamelCase ( ) -> int:
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=UpperCamelCase_ , default=UpperCamelCase_ , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=UpperCamelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=UpperCamelCase_ , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=UpperCamelCase_ , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=UpperCamelCase_ , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=UpperCamelCase_ , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=UpperCamelCase_ , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=UpperCamelCase_ , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=UpperCamelCase_ , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=UpperCamelCase_ , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=UpperCamelCase_ , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=UpperCamelCase_ , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=UpperCamelCase_ , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=UpperCamelCase_ , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase : Tuple = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
UpperCamelCase : Union[str, Any] = training_secondary_learner(
UpperCamelCase_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase : Optional[Any] = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase : Dict = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=UpperCamelCase_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=UpperCamelCase_ , secondary_learner=UpperCamelCase_ , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 40 |
'''simple docstring'''
class UpperCAmelCase :
def __init__( self : Union[str, Any] ):
UpperCAmelCase__ :dict[str, TrieNode] = {} # Mapping from char to TrieNode
UpperCAmelCase__ :Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCamelCase : list[str] ):
for word in words:
self.insert(__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : str ):
UpperCAmelCase__ :Tuple = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase__ :Optional[int] = TrieNode()
UpperCAmelCase__ :Dict = curr.nodes[char]
UpperCAmelCase__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : str ):
UpperCAmelCase__ :str = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase__ :Dict = curr.nodes[char]
return curr.is_leaf
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : str ):
def _delete(__lowerCamelCase : TrieNode , __lowerCamelCase : str , __lowerCamelCase : int ) -> bool:
if index == len(__lowerCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase__ :int = False
return len(curr.nodes ) == 0
UpperCAmelCase__ :str = word[index]
UpperCAmelCase__ :Optional[Any] = curr.nodes.get(__lowerCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase__ :Any = _delete(__lowerCamelCase , __lowerCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __lowerCamelCase , 0 )
def a__ ( UpperCamelCase_ : TrieNode, UpperCamelCase_ : str ):
if node.is_leaf:
print(UpperCamelCase_, end=''' ''' )
for key, value in node.nodes.items():
print_words(UpperCamelCase_, word + key )
def a__ ( ):
UpperCAmelCase__ :Union[str, Any] = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase__ :Union[str, Any] = TrieNode()
root.insert_many(UpperCamelCase_ )
# print_words(root, "")
assert all(root.find(UpperCamelCase_ ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : bool ):
print(str(UpperCamelCase_ ), '''works!''' if passes else '''doesn\'t work :(''' )
def a__ ( ):
assert test_trie()
def a__ ( ):
print_results('''Testing trie functionality''', test_trie() )
if __name__ == "__main__":
main()
| 467 | 0 |
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = len(__lowercase)
UpperCamelCase_ = len(__lowercase)
UpperCamelCase_ = [[False for _ in range(m + 1)] for _ in range(n + 1)]
UpperCamelCase_ = True
for i in range(__lowercase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase_ = True
if a[i].islower():
UpperCamelCase_ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 618 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Any:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase_ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase_ = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('Training examples: %s' , len(_UpperCAmelCase ) )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('Saving features into cached file %s' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Dict:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> int:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
UpperCamelCase_ = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _UpperCAmelCase ( self ) -> List[str]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> int:
return self.label_list
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def _UpperCAmelCase ( self ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
UpperCamelCase_ = '%s-%s' % (set_type, line[0])
UpperCamelCase_ = line[5]
UpperCamelCase_ = line[6]
UpperCamelCase_ = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase_ = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCamelCase_ = {label: i for i, label in enumerate(__lowercase)}
UpperCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase) , desc='convert examples to features'):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index))
UpperCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding='max_length' , truncation=__lowercase , return_overflowing_tokens=__lowercase , )
UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ = int(example.pairID)
features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase))
for i, example in enumerate(examples[:5]):
logger.info('*** Example ***')
logger.info(f"""guid: {example}""")
logger.info(f"""features: {features[i]}""")
return features
snake_case__ : List[str] = {
"""hans""": 3,
}
snake_case__ : Union[str, Any] = {
"""hans""": HansProcessor,
}
| 618 | 1 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( a, a, a, a ):
"""simple docstring"""
_a = BigBirdConfig.from_json_file(a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
_a = BigBirdForQuestionAnswering(a )
else:
_a = BigBirdForPreTraining(a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(a, a, is_trivia_qa=a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(a )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 388 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
# A mock response for an HTTP head request to emulate server down
_a = mock.Mock()
_a = 500
_a = {}
_a = HTTPError
_a = {}
# Download this model to make sure it's in the cache.
_a = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
_a = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ ( self :str ):
# This test is for deprecated behavior and can be removed in v5
_a = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls :Any ):
_a = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls :Any ):
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
_a = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase__ , repo_id="test-feature-extractor" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
_a = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
_a = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
_a = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
CustomFeatureExtractor.register_for_auto_class()
_a = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
_a = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 388 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ (__lowercase :List[Any] , __lowercase :Optional[int] , __lowercase :Tuple ) -> Dict:
_A : Union[str, Any] = OmegaConf.load(lowerCamelCase__ )
_A : List[str] = torch.load(lowerCamelCase__ , map_location='''cpu''' )["model"]
_A : List[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_A : Tuple = {}
_A : int = "first_stage_model."
for key in keys:
if key.startswith(lowerCamelCase__ ):
_A : str = state_dict[key]
# extract state_dict for UNetLDM
_A : str = {}
_A : int = "model.diffusion_model."
for key in keys:
if key.startswith(lowerCamelCase__ ):
_A : List[Any] = state_dict[key]
_A : int = config.model.params.first_stage_config.params
_A : str = config.model.params.unet_config.params
_A : Union[str, Any] = VQModel(**lowerCamelCase__ ).eval()
vqvae.load_state_dict(lowerCamelCase__ )
_A : Optional[Any] = UNetLDMModel(**lowerCamelCase__ ).eval()
unet.load_state_dict(lowerCamelCase__ )
_A : List[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCamelCase__ , )
_A : Dict = LDMPipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
pipeline.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
_UpperCamelCase : str =argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_UpperCamelCase : int =parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 710 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=__snake_case ):
__snake_case : Optional[Any] = ["note_seq"]
def __init__( self ,*A__ ,**A__ ):
requires_backends(self ,['''note_seq'''] )
@classmethod
def A__ ( cls ,*A__ ,**A__ ):
requires_backends(cls ,['''note_seq'''] )
@classmethod
def A__ ( cls ,*A__ ,**A__ ):
requires_backends(cls ,['''note_seq'''] )
| 332 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A = logging.getLogger(__name__)
def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> str:
"""simple docstring"""
lowerCamelCase__: int =bnb_quantization_config.load_in_abit
lowerCamelCase__: Any =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
lowerCamelCase__: List[Any] =[]
# custom device map
if isinstance(__a , __a ) and len(device_map.keys() ) > 1:
lowerCamelCase__: Optional[int] =[key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__: Any =get_keys_to_not_convert(__a )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__a )
lowerCamelCase__: List[str] =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: int =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__a )
# compatibility with peft
lowerCamelCase__: List[str] =load_in_abit
lowerCamelCase__: int =load_in_abit
lowerCamelCase__: Tuple =get_parameter_device(__a )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
lowerCamelCase__: Tuple =replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a )
# convert param to the right dtype
lowerCamelCase__: Dict =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__: str =name.replace(".weight" , "" ).replace(".bias" , "" )
lowerCamelCase__: Optional[Any] =getattr(__a , __a , __a )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__a ):
param.to(__a )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCamelCase__: str =replace_with_bnb_layers(
__a , __a , modules_to_not_convert=__a )
lowerCamelCase__: Optional[Any] =get_quantized_model_device_map(
__a , __a , __a , max_memory=__a , no_split_module_classes=__a , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__: Any =True
lowerCamelCase__: List[str] =any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
__a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__a , device_map=__a , offload_dir=__a )
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None ) -> str:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__: str ={"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(__a , __a ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
lowerCamelCase__: Optional[int] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__: Optional[Any] ={}
lowerCamelCase__: str =special_dtypes
lowerCamelCase__: List[str] =no_split_module_classes
lowerCamelCase__: Dict =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__: Optional[Any] =get_balanced_memory(
__a , low_zero=(device_map == "balanced_low_0") , max_memory=__a , **__a , )
lowerCamelCase__: Union[str, Any] =max_memory
lowerCamelCase__: Dict =infer_auto_device_map(__a , **__a )
if isinstance(__a , __a ):
# check if don't have any quantized module on the cpu
lowerCamelCase__: Union[str, Any] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__: List[Any] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__: List[Any] =[]
lowerCamelCase__ , lowerCamelCase__: Any =_replace_with_bnb_layers(
__a , __a , __a , __a )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Optional[int] =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__: Optional[Any] =[]
current_key_name.append(__a )
if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__: List[str] =".".join(__a )
lowerCamelCase__: Optional[Any] =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__: int =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__: Optional[int] =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__: Dict =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
lowerCamelCase__: Dict =module.weight.data
if module.bias is not None:
lowerCamelCase__: List[Any] =module.bias.data
bnb_module.requires_grad_(__a )
setattr(__a , __a , __a )
lowerCamelCase__: int =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__: List[str] =_replace_with_bnb_layers(
__a , __a , __a , __a )
lowerCamelCase__: Union[str, Any] =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
with init_empty_weights():
lowerCamelCase__: Any =deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__: str =find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a , __a ):
lowerCamelCase__: int =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__: str =sum(__a , [] )
lowerCamelCase__: str =len(__a ) > 0
# Check if it is a base model
lowerCamelCase__: Optional[Any] =False
if hasattr(__a , "base_model_prefix" ):
lowerCamelCase__: Union[str, Any] =not hasattr(__a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__: Optional[int] =list(model.named_children() )
lowerCamelCase__: Optional[int] =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__: Union[str, Any] =set(__a ) - set(__a )
lowerCamelCase__: List[str] =list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
lowerCamelCase__: List[Any] =[".weight", ".bias"]
lowerCamelCase__: Tuple =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__: Optional[Any] =name.replace(__a , "" )
filtered_module_names.append(__a )
return filtered_module_names
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
for m in model.modules():
if isinstance(__a , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a )
lowerCamelCase__: Dict =param_name
lowerCamelCase__: Tuple =model
if "." in tensor_name:
lowerCamelCase__: Any =tensor_name.split("." )
for split in splits[:-1]:
lowerCamelCase__: Any =getattr(__a , __a )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCamelCase__: str =new_module
lowerCamelCase__: int =splits[-1]
# offload weights
lowerCamelCase__: str =False
offload_weight(module._parameters[tensor_name] , __a , __a , index=__a )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __a , index=__a , )
else:
offload_weight(__a , __a , __a , index=__a )
offload_weight(__a , param_name.replace("weight" , "SCB" ) , __a , index=__a )
set_module_tensor_to_device(__a , __a , "meta" , dtype=__a , value=torch.empty(*param.size() ) )
| 59 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase_ = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
__a = self.diffusers_dir
shutil.copy(
os.path.join(_a , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __UpperCAmelCase ( self ):
__a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __UpperCAmelCase ( self , _a , _a , _a , _a=None ):
__a = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
__a = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
__a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__a = black.format_str(_a , mode=_a )
__a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_a , '''w''' , newline='''\n''' ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , '''r''' ) as f:
self.assertTrue(f.read() , _a )
def __UpperCAmelCase ( self ):
__a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_a , _a )
def __UpperCAmelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _a , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _a ) , )
# Copy consistency with a really long name
__a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub('''Bert''' , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _a , overwrite_result=re.sub('''DDPM''' , '''Test''' , _a ) , )
| 65 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__a = number_of_bytes // partitions
__a = []
for i in range(lowerCAmelCase__ ):
__a = i * bytes_per_partition + 1
__a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : Any = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['OwlViTFeatureExtractor']
a_ : List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 |
'''simple docstring'''
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
return 1 if input_a == input_a else 0
def _lowercase ( ):
assert xnor_gate(0, 0 ) == 1
assert xnor_gate(0, 1 ) == 0
assert xnor_gate(1, 0 ) == 0
assert xnor_gate(1, 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 365 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=[] ) -> Tuple:
a__ : List[str] = size[0] - overlap_pixels * 2
a__ : Optional[int] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
a__ : int = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
a__ : Any = np.pad(__UpperCamelCase , mode="linear_ramp" , pad_width=__UpperCamelCase , end_values=0 )
if "l" in remove_borders:
a__ : str = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
a__ : Union[str, Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
a__ : str = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
a__ : List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
return max(__UpperCamelCase , min(__UpperCamelCase , __UpperCamelCase ) )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
a__ : Tuple = list(__UpperCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
a__ : Union[str, Any] = clamp_rect(__UpperCamelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
a__ : List[Any] = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__UpperCamelCase , (original_slice, 0) )
return result
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> int:
a__ : List[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
a__ : Any = tile.crop(__UpperCamelCase )
return tile
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
a__ : Any = n % d
return n - divisor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 350 , ):
"""simple docstring"""
super().__init__(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , max_noise_level=__UpperCAmelCase , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
torch.manual_seed(0 )
a__ : Dict = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
a__ : Optional[Any] = add_overlap_rect(__UpperCAmelCase , __UpperCAmelCase , image.size )
a__ : Union[str, Any] = image.crop(__UpperCAmelCase )
a__ : Optional[Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
a__ : int = translated_slice_x - (original_image_slice / 2)
a__ : Optional[Any] = max(0 , __UpperCAmelCase )
a__ : Union[str, Any] = squeeze_tile(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ : Union[str, Any] = to_input.size
a__ : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
a__ : int = super(__UpperCAmelCase , self ).__call__(image=__UpperCAmelCase , **__UpperCAmelCase ).images[0]
a__ : Optional[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
a__ : Optional[int] = unsqueeze_tile(__UpperCAmelCase , __UpperCAmelCase )
a__ : Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
a__ : Optional[Any] = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
a__ : Dict = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__UpperCAmelCase ) , mode="L" , )
final_image.paste(
__UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 75 , __UpperCAmelCase = 9.0 , __UpperCAmelCase = 50 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 128 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , ):
"""simple docstring"""
a__ : str = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
a__ : str = math.ceil(image.size[0] / tile_size )
a__ : Dict = math.ceil(image.size[1] / tile_size )
a__ : int = tcx * tcy
a__ : Optional[Any] = 0
for y in range(__UpperCAmelCase ):
for x in range(__UpperCAmelCase ):
self._process_tile(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prompt=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , noise_level=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def SCREAMING_SNAKE_CASE( ) -> Optional[int]:
# Run a demo
a__ : Tuple = "stabilityai/stable-diffusion-x4-upscaler"
a__ : Optional[int] = StableDiffusionTiledUpscalePipeline.from_pretrained(__UpperCamelCase , revision="fp16" , torch_dtype=torch.floataa )
a__ : str = pipe.to("cuda" )
a__ : Dict = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(__UpperCamelCase ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save("diffusers_library_progress.jpg" )
a__ : Tuple = pipe(image=__UpperCamelCase , prompt="Black font, white background, vector" , noise_level=40 , callback=__UpperCamelCase )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 710 |
import string
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> str:
a__ : Optional[int] = ""
for i in sequence:
a__ : List[str] = ord(__UpperCamelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> str:
a__ : Dict = string.ascii_letters
a__ : Optional[Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__UpperCamelCase )] if c in letters else c for c in sequence )
def SCREAMING_SNAKE_CASE( ) -> None:
from timeit import timeit
print("Running performance benchmarks..." )
a__ : Optional[Any] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=__UpperCamelCase )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" , setup=__UpperCamelCase )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 207 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> tuple[int, int]:
if b == 0:
return (1, 0)
((lowerCamelCase) , (lowerCamelCase)) : Optional[Any] = extended_euclid(UpperCamelCase__ , a % b )
lowerCamelCase : List[str] = a // b
return (y, x - k * y)
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
((lowerCamelCase) , (lowerCamelCase)) : Dict = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[Any] = na * na
lowerCamelCase : Any = ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
((lowerCamelCase) , (lowerCamelCase)) : Optional[Any] = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
if b < 0:
lowerCamelCase : Dict = (b % n + n) % n
return b
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
lowerCamelCase , lowerCamelCase : Dict = invert_modulo(UpperCamelCase__ , UpperCamelCase__ ), invert_modulo(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = na * na
lowerCamelCase : Optional[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 222 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowerCamelCase :int = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
__lowerCamelCase :List[Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
__lowerCamelCase :List[Any] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
__lowerCamelCase :Any = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
__lowerCamelCase :Any = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ) -> Optional[int]:
for tf_name, hf_name in patterns:
lowerCamelCase : int = k.replace(UpperCamelCase__ , UpperCamelCase__ )
return k
def snake_case ( UpperCamelCase__ : dict , UpperCamelCase__ : dict ) -> BigBirdPegasusForConditionalGeneration:
lowerCamelCase : Tuple = BigBirdPegasusConfig(**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = BigBirdPegasusForConditionalGeneration(UpperCamelCase__ )
lowerCamelCase : List[Any] = torch_model.state_dict()
lowerCamelCase : Union[str, Any] = {}
# separating decoder weights
lowerCamelCase : str = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
lowerCamelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
lowerCamelCase : int = [k.endswith(UpperCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCamelCase__ ):
continue
lowerCamelCase : Optional[int] = DECODER_PATTERNS
lowerCamelCase : Union[str, Any] = rename_state_dict_key(UpperCamelCase__ , UpperCamelCase__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowerCamelCase : Optional[int] = v.T
lowerCamelCase : Tuple = torch.from_numpy(UpperCamelCase__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
lowerCamelCase : List[Any] = [k.endswith(UpperCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCamelCase__ ):
continue
lowerCamelCase : List[str] = REMAINING_PATTERNS
lowerCamelCase : Optional[Any] = rename_state_dict_key(UpperCamelCase__ , UpperCamelCase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowerCamelCase : Optional[Any] = v.T
lowerCamelCase : List[str] = torch.from_numpy(UpperCamelCase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
lowerCamelCase : List[Any] = mapping["""model.embed_positions.weight"""]
lowerCamelCase : Tuple = mapping.pop("""model.embed_positions.weight""" )
lowerCamelCase , lowerCamelCase : Dict = torch_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
lowerCamelCase : int = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def snake_case ( UpperCamelCase__ : Optional[int] ) -> Dict:
lowerCamelCase : str = tf.train.list_variables(UpperCamelCase__ )
lowerCamelCase : str = {}
lowerCamelCase : Dict = ["""global_step"""]
for name, shape in tqdm(UpperCamelCase__ , desc="""converting tf checkpoint to dict""" ):
lowerCamelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCamelCase : Union[str, Any] = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = array
return tf_weights
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : dict ) -> Optional[int]:
lowerCamelCase : List[Any] = get_tf_weights_as_numpy(UpperCamelCase__ )
lowerCamelCase : List[str] = convert_bigbird_pegasus(UpperCamelCase__ , UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 222 | 1 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if len(A__ ) <= 1:
return [tuple(A__ )]
a_ = []
def generate(UpperCAmelCase , UpperCAmelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , A__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
a_ , a_ = arr[k - 1], arr[i]
else: # k is odd
a_ , a_ = arr[k - 1], arr[0]
generate(k - 1 , A__ )
generate(len(A__ ) , A__ )
return res
if __name__ == "__main__":
UpperCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(',')]
print(heaps(arr)) | 714 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCamelCase_ = threading.Lock()
UpperCamelCase_ = None
UpperCamelCase_ = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCamelCase_ = logging.WARNING
UpperCamelCase_ = True
def UpperCamelCase ( ) ->Any:
"""simple docstring"""
a_ = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def UpperCamelCase ( ) ->str:
"""simple docstring"""
return __name__.split("." )[0]
def UpperCamelCase ( ) ->logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCamelCase ( ) ->None:
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
a_ = logging.StreamHandler() # Set sys.stderr as stream.
a_ = sys.stderr.flush
# Apply our default configuration to the library root logger.
a_ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
a_ = False
def UpperCamelCase ( ) ->None:
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
a_ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
a_ = None
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
return log_levels
def UpperCamelCase ( UpperCAmelCase = None ) ->logging.Logger:
"""simple docstring"""
if name is None:
a_ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase )
def UpperCamelCase ( ) ->int:
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase )
def UpperCamelCase ( ) ->str:
"""simple docstring"""
return set_verbosity(UpperCAmelCase )
def UpperCamelCase ( ) ->Optional[int]:
"""simple docstring"""
return set_verbosity(UpperCAmelCase )
def UpperCamelCase ( ) ->int:
"""simple docstring"""
return set_verbosity(UpperCAmelCase )
def UpperCamelCase ( ) ->Union[str, Any]:
"""simple docstring"""
return set_verbosity(UpperCAmelCase )
def UpperCamelCase ( ) ->None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCamelCase ( ) ->None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase )
def UpperCamelCase ( ) ->None:
"""simple docstring"""
_configure_library_root_logger()
a_ = False
def UpperCamelCase ( ) ->None:
"""simple docstring"""
_configure_library_root_logger()
a_ = True
def UpperCamelCase ( ) ->None:
"""simple docstring"""
a_ = _get_library_root_logger().handlers
for handler in handlers:
a_ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCAmelCase )
def UpperCamelCase ( ) ->None:
"""simple docstring"""
a_ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase , **UpperCAmelCase )
UpperCamelCase_ = warning_advice
@functools.lru_cache(UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) ->Any:
"""simple docstring"""
self.warning(*UpperCAmelCase , **UpperCAmelCase )
UpperCamelCase_ = warning_once
class snake_case :
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]: # pylint: disable=unused-argument
a_ = args[0] if args else None
def __iter__( self) ->Optional[int]:
return iter(self._iterator)
def __getattr__( self , __UpperCAmelCase) ->Any:
def empty_fn(*__UpperCAmelCase , **__UpperCAmelCase): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self) ->str:
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]:
return
class snake_case :
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Tuple:
if _tqdm_active:
return tqdm_lib.tqdm(*__UpperCAmelCase , **__UpperCAmelCase)
else:
return EmptyTqdm(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int:
a_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase_ = _tqdm_cls()
def UpperCamelCase ( ) ->bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase ( ) ->int:
"""simple docstring"""
global _tqdm_active
a_ = True
hf_hub_utils.enable_progress_bars()
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
global _tqdm_active
a_ = False
hf_hub_utils.disable_progress_bars() | 210 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
A__ : int = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A__ : int = 250004
A__ : Union[str, Any] = 250020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = MBartaaTokenizer
lowerCamelCase : Optional[int] = MBartaaTokenizerFast
lowerCamelCase : str = True
lowerCamelCase : Any = True
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = MBartaaTokenizer(SCREAMING_SNAKE_CASE_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = '<s>'
__lowerCamelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 10_54 )
def lowercase_ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[Any] = MBartaaTokenizer(SCREAMING_SNAKE_CASE_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def lowercase_ ( self ) -> int:
# fmt: off
__lowerCamelCase : str = {'input_ids': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def lowercase_ ( self ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCamelCase : List[Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = tempfile.mkdtemp()
__lowerCamelCase : Dict = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__lowerCamelCase : Union[str, Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
__lowerCamelCase : Optional[int] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase : str = tempfile.mkdtemp()
__lowerCamelCase : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
__lowerCamelCase : Tuple = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase : int = tempfile.mkdtemp()
__lowerCamelCase : str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase : Tuple = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = 'facebook/mbart-large-50-one-to-many-mmt'
lowerCamelCase : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase : List[Any] = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def lowercase_ ( cls ) -> str:
__lowerCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__lowerCamelCase : List[Any] = 1
return cls
def lowercase_ ( self ) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_00_38 )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Dict:
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
__lowerCamelCase : List[str] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowerCamelCase : Union[str, Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Optional[Any] = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = 10
__lowerCamelCase : Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[0] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_00_53, 25_00_01] )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : str = tempfile.mkdtemp()
__lowerCamelCase : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = MBartaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
__lowerCamelCase : Any = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__lowerCamelCase : Tuple = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowerCamelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Dict = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
__lowerCamelCase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
__lowerCamelCase : Optional[Any] = targets['input_ids']
__lowerCamelCase : List[str] = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase_ ( self ) -> int:
__lowerCamelCase : List[str] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_00_04, 62, 30_34, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_00_01,
} , )
| 13 |
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13 | 1 |
from ...processing_utils import ProcessorMixin
class _lowerCamelCase( _a ):
lowercase_ : str = ["""image_processor""", """feature_extractor"""]
lowercase_ : int = """TvltImageProcessor"""
lowercase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
super().__init__(image_processor=lowerCamelCase, feature_extractor=lowerCamelCase)
_lowercase : List[str] = image_processor
_lowercase : str = feature_extractor
def __call__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=False, *lowerCamelCase, **lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_lowercase : int = None
if images is not None:
_lowercase : str = self.image_processor(lowerCamelCase, mask_pixel=lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
if images_mixed is not None:
_lowercase : Optional[int] = self.image_processor(lowerCamelCase, is_mixed=lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
if audio is not None:
_lowercase : str = self.feature_extractor(
lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, mask_audio=lowerCamelCase, **lowerCamelCase)
_lowercase : List[Any] = {}
if audio is not None:
output_dict.update(lowerCamelCase)
if images is not None:
output_dict.update(lowerCamelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCamelCase)
return output_dict
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = self.image_processor.model_input_names
_lowercase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 702 |
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCamelCase( _a ):
def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> int:
"""simple docstring"""
if tokenize_kwargs is None:
_lowercase : Any = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)')
_lowercase : List[Any] = truncation
_lowercase : Optional[Any] = tokenize_kwargs
_lowercase : Tuple = {}
if return_tensors is not None:
_lowercase : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase ( self, lowerCamelCase, **lowerCamelCase) -> Dict[str, GenericTensor]:
"""simple docstring"""
_lowercase : Optional[Any] = self.framework
_lowercase : Dict = self.tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
return model_inputs
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : str = self.model(**lowerCamelCase)
return model_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=False) -> Dict:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowerCamelCase, **lowerCamelCase)
| 354 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Collection[float] | None = None ):
if components is None:
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :Optional[Any] = list(lowerCamelCase__ )
def __len__( self :List[Any] ):
return len(self.__components )
def __str__( self :List[Any] ):
return "(" + ",".join(map(lowerCamelCase__ , self.__components ) ) + ")"
def __add__( self :List[Any] , lowerCamelCase__ :Vector ):
UpperCamelCase__ :Optional[int] = len(self )
if size == len(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = [self.__components[i] + other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self :Any , lowerCamelCase__ :Vector ):
UpperCamelCase__ :List[Any] = len(self )
if size == len(lowerCamelCase__ ):
UpperCamelCase__ :Dict = [self.__components[i] - other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self :str , lowerCamelCase__ :float ):
...
@overload
def __mul__( self :Union[str, Any] , lowerCamelCase__ :Vector ):
...
def __mul__( self :List[Any] , lowerCamelCase__ :float | Vector ):
if isinstance(lowerCamelCase__ , (float, int) ):
UpperCamelCase__ :int = [c * other for c in self.__components]
return Vector(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(self ) == len(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = len(self )
UpperCamelCase__ :List[str] = [self.__components[i] * other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return sum(lowerCamelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __a ( self :Dict ):
return Vector(self.__components )
def __a ( self :Optional[Any] , lowerCamelCase__ :int ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __a ( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :float ):
assert -len(self.__components ) <= pos < len(self.__components )
UpperCamelCase__ :List[Any] = value
def __a ( self :Optional[Any] ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
UpperCamelCase__ :List[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCamelCase__ ) )
def __a ( self :Any , lowerCamelCase__ :Vector , lowerCamelCase__ :bool = False ):
UpperCamelCase__ :Tuple = self * other
UpperCamelCase__ :Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A ( lowercase__ : int ) -> Vector:
assert isinstance(lowercase__ , lowercase__ )
return Vector([0] * dimension )
def A ( lowercase__ : int , lowercase__ : int ) -> Vector:
assert isinstance(lowercase__ , lowercase__ ) and (isinstance(lowercase__ , lowercase__ ))
UpperCamelCase__ :List[Any] = [0] * dimension
UpperCamelCase__ :int = 1
return Vector(lowercase__ )
def A ( lowercase__ : float , lowercase__ : Vector , lowercase__ : Vector ) -> Vector:
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (isinstance(lowercase__ , (int, float) ))
)
return x * scalar + y
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> Vector:
random.seed(lowercase__ )
UpperCamelCase__ :Tuple = [random.randint(lowercase__ , lowercase__ ) for _ in range(lowercase__ )]
return Vector(lowercase__ )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase__ :list[list[float]] , lowerCamelCase__ :int , lowerCamelCase__ :int ):
UpperCamelCase__ :List[Any] = matrix
UpperCamelCase__ :Dict = w
UpperCamelCase__ :int = h
def __str__( self :Tuple ):
UpperCamelCase__ :Tuple = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self :List[str] , lowerCamelCase__ :Matrix ):
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :Any = []
for i in range(self.__height ):
UpperCamelCase__ :Dict = [
self.__matrix[i][j] + other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self :List[Any] , lowerCamelCase__ :Matrix ):
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :List[Any] = []
for i in range(self.__height ):
UpperCamelCase__ :Optional[int] = [
self.__matrix[i][j] - other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self :Dict , lowerCamelCase__ :float ):
...
@overload
def __mul__( self :Optional[int] , lowerCamelCase__ :Vector ):
...
def __mul__( self :Optional[Any] , lowerCamelCase__ :float | Vector ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ): # matrix-vector
if len(lowerCamelCase__ ) == self.__width:
UpperCamelCase__ :List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
UpperCamelCase__ :Union[str, Any] = [
self.__matrix[i][j] * other.component(lowerCamelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCamelCase__ , sum(lowerCamelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCamelCase__ , (int, float) ): # matrix-scalar
UpperCamelCase__ :str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCamelCase__ , self.__width , self.__height )
return None
def __a ( self :Dict ):
return self.__height
def __a ( self :int ):
return self.__width
def __a ( self :List[str] , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __a ( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCamelCase__ :List[str] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __a ( self :int , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
UpperCamelCase__ :Any = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCamelCase__ ) ):
UpperCamelCase__ :Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __a ( self :str , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCamelCase__ , lowerCamelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __a ( self :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCamelCase__ :str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCamelCase__ ) for y in range(self.__width )
]
return sum(lowerCamelCase__ )
def A ( lowercase__ : int ) -> Matrix:
UpperCamelCase__ :list[list[float]] = [[0] * n for _ in range(lowercase__ )]
return Matrix(lowercase__ , lowercase__ , lowercase__ )
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> Matrix:
random.seed(lowercase__ )
UpperCamelCase__ :list[list[float]] = [
[random.randint(lowercase__ , lowercase__ ) for _ in range(lowercase__ )] for _ in range(lowercase__ )
]
return Matrix(lowercase__ , lowercase__ , lowercase__ ) | 45 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal") | 45 | 1 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
a__ = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _lowercase )
if matches:
a__ = float(matches[1] )
a__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
a__ = 10_01
a__ = "imagenet-1k-id2label.json"
a__ = "huggingface/label-files"
a__ = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="dataset" ) , "r" ) )
a__ = {int(_lowercase ) + 1: v for k, v in idalabel.items()}
a__ = "background"
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ():
"""simple docstring"""
a__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase=False ):
"""simple docstring"""
a__ = get_mobilenet_va_config(_lowercase )
# Load 🤗 model
a__ = MobileNetVaForImageClassification(_lowercase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowercase , _lowercase , _lowercase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
a__ = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
a__ = image_processor(images=prepare_img() , return_tensors="pt" )
a__ = model(**_lowercase )
a__ = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
a__ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
a__ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
a__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
print("Pushing to the hub..." )
a__ = "google/" + model_name
image_processor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
UpperCamelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase_ : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 706 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def _lowerCAmelCase ():
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(F'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(F'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(F'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 394 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _lowercase : List[str] ) -> Optional[Any]:
'''simple docstring'''
return x + 2
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self: Optional[Any] ):
lowercase__ : int = 'x = 3'
lowercase__ : List[str] = {}
lowercase__ : Tuple = evaluate(_UpperCamelCase, {}, state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase, {'x': 3} )
lowercase__ : Tuple = 'x = y'
lowercase__ : Dict = {'y': 5}
lowercase__ : Any = evaluate(_UpperCamelCase, {}, state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase, {'x': 5, 'y': 5} )
def snake_case__( self: List[str] ):
lowercase__ : Dict = 'y = add_two(x)'
lowercase__ : Any = {'x': 3}
lowercase__ : Optional[int] = evaluate(_UpperCamelCase, {'add_two': add_two}, state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase, {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ : List[str] = evaluate(_UpperCamelCase, {}, state=_UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__( self: Optional[int] ):
lowercase__ : Any = 'x = 3'
lowercase__ : str = {}
lowercase__ : str = evaluate(_UpperCamelCase, {}, state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase, {'x': 3} )
def snake_case__( self: Optional[Any] ):
lowercase__ : str = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowercase__ : str = {'x': 3}
lowercase__ : Dict = evaluate(_UpperCamelCase, {'add_two': add_two}, state=_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase, {'x': 3, 'y': 5} )
self.assertDictEqual(_UpperCamelCase, {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def snake_case__( self: Any ):
lowercase__ : Any = 'x = 3\ny = 5'
lowercase__ : Any = {}
lowercase__ : Union[str, Any] = evaluate(_UpperCamelCase, {}, state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase, {'x': 3, 'y': 5} )
def snake_case__( self: str ):
lowercase__ : str = 'text = f\'This is x: {x}.\''
lowercase__ : Optional[int] = {'x': 3}
lowercase__ : Any = evaluate(_UpperCamelCase, {}, state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCamelCase, {'x': 3, 'text': 'This is x: 3.'} )
def snake_case__( self: Any ):
lowercase__ : Optional[int] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowercase__ : List[Any] = {'x': 3}
lowercase__ : Tuple = evaluate(_UpperCamelCase, {}, state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCamelCase, {'x': 3, 'y': 2} )
lowercase__ : int = {'x': 8}
lowercase__ : List[str] = evaluate(_UpperCamelCase, {}, state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase, {'x': 8, 'y': 5} )
def snake_case__( self: str ):
lowercase__ : List[str] = 'test_list = [x, add_two(x)]'
lowercase__ : Any = {'x': 3}
lowercase__ : str = evaluate(_UpperCamelCase, {'add_two': add_two}, state=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase, [3, 5] )
self.assertDictEqual(_UpperCamelCase, {'x': 3, 'test_list': [3, 5]} )
def snake_case__( self: Optional[int] ):
lowercase__ : str = 'y = x'
lowercase__ : str = {'x': 3}
lowercase__ : Dict = evaluate(_UpperCamelCase, {}, state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase, {'x': 3, 'y': 3} )
def snake_case__( self: Optional[int] ):
lowercase__ : Optional[Any] = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowercase__ : Optional[Any] = {'x': 3}
lowercase__ : List[Any] = evaluate(_UpperCamelCase, {'add_two': add_two}, state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase, {'x': 3, 'test_list': [3, 5]} )
lowercase__ : Dict = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowercase__ : int = {'x': 3}
lowercase__ : Tuple = evaluate(_UpperCamelCase, {'add_two': add_two}, state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase, {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def snake_case__( self: Any ):
lowercase__ : List[str] = 'x = 0\nfor i in range(3):\n x = i'
lowercase__ : Any = {}
lowercase__ : List[Any] = evaluate(_UpperCamelCase, {'range': range}, state=_UpperCamelCase )
assert result == 2
self.assertDictEqual(_UpperCamelCase, {'x': 2, 'i': 2} )
| 266 |
def lowerCamelCase__ ( __A :int ,__A :float ,__A :float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__a: Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
@dataclass(frozen=a__ )
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase = False , ) -> Optional[Any]:
lowercase__ : str = hans_processors[task]()
lowercase__ : Any = os.path.join(
__lowerCAmelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__lowerCAmelCase ) , __lowerCAmelCase , ) , )
lowercase__ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ , lowercase__ : Union[str, Any] = label_list[2], label_list[1]
lowercase__ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ : int = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowercase__ : Optional[Any] = torch.load(__lowerCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowercase__ : List[Any] = (
processor.get_dev_examples(__lowerCAmelCase ) if evaluate else processor.get_train_examples(__lowerCAmelCase )
)
logger.info('''Training examples: %s''' , len(__lowerCAmelCase ) )
lowercase__ : Union[str, Any] = hans_convert_examples_to_features(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
logger.info('''Saving features into cached file %s''' , __lowerCAmelCase )
torch.save(self.features , __lowerCAmelCase )
def __len__( self ) -> Optional[Any]:
return len(self.features )
def __getitem__( self , __lowerCAmelCase ) -> InputFeatures:
return self.features[i]
def _lowerCAmelCase( self ) -> Any:
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 128 , __lowerCAmelCase=False , __lowerCAmelCase = False , ) -> Optional[int]:
lowercase__ : int = hans_processors[task]()
lowercase__ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ , lowercase__ : str = label_list[2], label_list[1]
lowercase__ : Optional[int] = label_list
lowercase__ : Union[str, Any] = processor.get_dev_examples(__lowerCAmelCase ) if evaluate else processor.get_train_examples(__lowerCAmelCase )
lowercase__ : Union[str, Any] = hans_convert_examples_to_features(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__lowerCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase__ : Union[str, Any] = tf.data.Dataset.from_generator(
__lowerCAmelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase( self ) -> Any:
return self.dataset
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , __lowerCAmelCase ) -> InputFeatures:
return self.features[i]
def _lowerCAmelCase( self ) -> Optional[Any]:
return self.label_list
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
return self._create_examples(self._read_tsv(os.path.join(__lowerCAmelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
return self._create_examples(self._read_tsv(os.path.join(__lowerCAmelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _lowerCAmelCase( self ) -> Dict:
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Dict = []
for i, line in enumerate(__lowerCAmelCase ):
if i == 0:
continue
lowercase__ : Optional[int] = '''%s-%s''' % (set_type, line[0])
lowercase__ : Dict = line[5]
lowercase__ : Optional[int] = line[6]
lowercase__ : Optional[int] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
lowercase__ : str = line[0]
examples.append(InputExample(guid=__lowerCAmelCase , text_a=__lowerCAmelCase , text_b=__lowerCAmelCase , label=__lowerCAmelCase , pairID=__lowerCAmelCase ) )
return examples
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
lowercase__ : Tuple = {label: i for i, label in enumerate(UpperCAmelCase )}
lowercase__ : Tuple = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
lowercase__ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , )
lowercase__ : List[str] = label_map[example.label] if example.label in label_map else 0
lowercase__ : str = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
__a: Optional[Any] = {
"""hans""": 3,
}
__a: Dict = {
"""hans""": HansProcessor,
}
| 428 | '''simple docstring'''
from random import randint, random
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = 5 , ):
lowercase__ : Optional[Any] = [[-1] * number_of_cells] # Create a highway without any car
lowercase__ : List[str] = 0
lowercase__ : Optional[Any] = max(UpperCAmelCase , 0 )
while i < number_of_cells:
lowercase__ : str = (
randint(0 , UpperCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : str = 0
lowercase__ : Union[str, Any] = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase , -1 )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = len(UpperCAmelCase )
# Beforce calculations, the highway is empty
lowercase__ : List[Any] = [-1] * number_of_cells
for car_index in range(UpperCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase__ : int = min(highway_now[car_index] + 1 , UpperCAmelCase )
# Number of empty cell before the next car
lowercase__ : Dict = get_distance(UpperCAmelCase , UpperCAmelCase ) - 1
# We can't have the car causing an accident
lowercase__ : int = min(next_highway[car_index] , UpperCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
lowercase__ : Any = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = len(highway[0] )
for i in range(UpperCAmelCase ):
lowercase__ : Union[str, Any] = update(highway[i] , UpperCAmelCase , UpperCAmelCase )
lowercase__ : Dict = [-1] * number_of_cells
for car_index in range(UpperCAmelCase ):
lowercase__ : int = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase__ : List[str] = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase__ : Union[str, Any] = speed
highway.append(UpperCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ={
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ="pegasus"
SCREAMING_SNAKE_CASE_ : str =["past_key_values"]
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , SCREAMING_SNAKE_CASE_=5_0265 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def _a (self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def _a (self ) -> int:
'''simple docstring'''
return self.d_model
| 415 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embeddings_size
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = scope
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def _a (self ) -> List[str]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = TFRegNetModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFRegNetForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _A ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Dict =(
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
SCREAMING_SNAKE_CASE_ : int =False
SCREAMING_SNAKE_CASE_ : List[Any] =False
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = TFRegNetModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _a (self ) -> int:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _a (self ) -> Dict:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ = layer_type
UpperCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _a (self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFRegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __UpperCamelCase ( ):
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def _a (self ) -> Any:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 415 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
__magic_name__ :str = 1_0_2_4
__magic_name__ :Tuple = 4_0_9_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :int = 1_6
__magic_name__ :Dict = [5, 1_1, 1_7, 2_3]
__magic_name__ :Optional[int] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__magic_name__ :Union[str, Any] = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
__magic_name__ :Tuple = True
__magic_name__ :int = 1_5_0
__magic_name__ :List[Any] = '''huggingface/label-files'''
__magic_name__ :Dict = '''ade20k-id2label.json'''
__magic_name__ :str = json.load(open(cached_download(hf_hub_url(snake_case, snake_case, repo_type='''dataset''' ) ), '''r''' ) )
__magic_name__ :Optional[int] = {int(snake_case ): v for k, v in idalabel.items()}
__magic_name__ :Any = idalabel
__magic_name__ :List[str] = {v: k for k, v in idalabel.items()}
__magic_name__ :Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__magic_name__ :int = name.replace('''pretrained.model''', '''dpt.encoder''' )
if "pretrained.model" in name:
__magic_name__ :str = name.replace('''pretrained.model''', '''dpt.embeddings''' )
if "patch_embed" in name:
__magic_name__ :Optional[Any] = name.replace('''patch_embed''', '''patch_embeddings''' )
if "pos_embed" in name:
__magic_name__ :List[Any] = name.replace('''pos_embed''', '''position_embeddings''' )
if "attn.proj" in name:
__magic_name__ :List[Any] = name.replace('''attn.proj''', '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__magic_name__ :Dict = name.replace('''proj''', '''projection''' )
if "blocks" in name:
__magic_name__ :str = name.replace('''blocks''', '''layer''' )
if "mlp.fc1" in name:
__magic_name__ :Dict = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ :Tuple = name.replace('''mlp.fc2''', '''output.dense''' )
if "norm1" in name:
__magic_name__ :List[Any] = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
__magic_name__ :Union[str, Any] = name.replace('''norm2''', '''layernorm_after''' )
if "scratch.output_conv" in name:
__magic_name__ :Any = name.replace('''scratch.output_conv''', '''head''' )
if "scratch" in name:
__magic_name__ :str = name.replace('''scratch''', '''neck''' )
if "layer1_rn" in name:
__magic_name__ :Union[str, Any] = name.replace('''layer1_rn''', '''convs.0''' )
if "layer2_rn" in name:
__magic_name__ :int = name.replace('''layer2_rn''', '''convs.1''' )
if "layer3_rn" in name:
__magic_name__ :str = name.replace('''layer3_rn''', '''convs.2''' )
if "layer4_rn" in name:
__magic_name__ :Optional[Any] = name.replace('''layer4_rn''', '''convs.3''' )
if "refinenet" in name:
__magic_name__ :int = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__magic_name__ :List[Any] = name.replace(f'''refinenet{layer_idx}''', f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__magic_name__ :str = name.replace('''out_conv''', '''projection''' )
if "resConfUnit1" in name:
__magic_name__ :Dict = name.replace('''resConfUnit1''', '''residual_layer1''' )
if "resConfUnit2" in name:
__magic_name__ :int = name.replace('''resConfUnit2''', '''residual_layer2''' )
if "conv1" in name:
__magic_name__ :str = name.replace('''conv1''', '''convolution1''' )
if "conv2" in name:
__magic_name__ :Union[str, Any] = name.replace('''conv2''', '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__magic_name__ :Optional[int] = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__magic_name__ :int = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__magic_name__ :Optional[int] = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__magic_name__ :str = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__magic_name__ :Any = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__magic_name__ :Any = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__magic_name__ :Dict = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__magic_name__ :Optional[Any] = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__magic_name__ :List[str] = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__magic_name__ :Optional[Any] = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__magic_name__ :Optional[Any] = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__magic_name__ :Any = name.replace('''pretrained''', '''dpt''' )
if "bn" in name:
__magic_name__ :int = name.replace('''bn''', '''batch_norm''' )
if "head" in name:
__magic_name__ :str = name.replace('''head''', '''head.head''' )
if "encoder.norm" in name:
__magic_name__ :Union[str, Any] = name.replace('''encoder.norm''', '''layernorm''' )
if "auxlayer" in name:
__magic_name__ :Dict = name.replace('''auxlayer''', '''auxiliary_head.head''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ :int = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__magic_name__ :Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ :Optional[Any] = in_proj_weight[: config.hidden_size, :]
__magic_name__ :str = in_proj_bias[: config.hidden_size]
__magic_name__ :List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ :Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ :int = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ :Union[str, Any] = Image.open(requests.get(snake_case, stream=snake_case ).raw )
return im
@torch.no_grad()
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Any = get_dpt_config(snake_case )
# load original state_dict from URL
__magic_name__ :List[str] = torch.hub.load_state_dict_from_url(snake_case, map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(snake_case )
# rename keys
for key in state_dict.copy().keys():
__magic_name__ :int = state_dict.pop(snake_case )
__magic_name__ :Tuple = val
# read in qkv matrices
read_in_q_k_v(snake_case, snake_case )
# load HuggingFace model
__magic_name__ :int = DPTForSemanticSegmentation(snake_case ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(snake_case )
model.load_state_dict(snake_case )
model.eval()
# Check outputs on an image
__magic_name__ :Union[str, Any] = 4_8_0 if '''ade''' in checkpoint_url else 3_8_4
__magic_name__ :int = DPTImageProcessor(size=snake_case )
__magic_name__ :Dict = prepare_img()
__magic_name__ :List[str] = image_processor(snake_case, return_tensors='''pt''' )
# forward pass
__magic_name__ :Dict = model(**snake_case ).logits if '''ade''' in checkpoint_url else model(**snake_case ).predicted_depth
# Assert logits
__magic_name__ :int = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
__magic_name__ :Any = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3], snake_case, atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], snake_case )
)
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(snake_case, snake_case ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=snake_case, )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case, snake_case ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=snake_case, )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 180 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : tuple , __lowerCAmelCase : Path , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=False , ) -> Optional[Any]:
output_path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowerCAmelCase , __lowerCAmelCase , f=output_path.as_posix() , input_names=__lowerCAmelCase , output_names=__lowerCAmelCase , dynamic_axes=__lowerCAmelCase , do_constant_folding=__lowerCAmelCase , use_external_data_format=__lowerCAmelCase , enable_onnx_checker=__lowerCAmelCase , opset_version=__lowerCAmelCase , )
else:
export(
__lowerCAmelCase , __lowerCAmelCase , f=output_path.as_posix() , input_names=__lowerCAmelCase , output_names=__lowerCAmelCase , dynamic_axes=__lowerCAmelCase , do_constant_folding=__lowerCAmelCase , opset_version=__lowerCAmelCase , )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : bool = False ) -> Tuple:
__UpperCamelCase : Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase : List[str] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__UpperCamelCase : Tuple = """cpu"""
__UpperCamelCase : List[Any] = Path(__lowerCAmelCase )
# VAE DECODER
__UpperCamelCase : str = AutoencoderKL.from_pretrained(model_path + """/vae""" )
__UpperCamelCase : str = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase : List[Any] = vae_decoder.decode
onnx_export(
__lowerCAmelCase , model_args=(
torch.randn(1 , __lowerCAmelCase , 25 , 25 ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__lowerCAmelCase , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
UpperCamelCase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 269 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _A :
def __init__( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Optional[int]=7 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : Optional[Any]=32 , lowerCamelCase__ : Dict=5 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Dict=37 , lowerCamelCase__ : Union[str, Any]="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : str=5_12 , lowerCamelCase__ : int=16 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Any=0.02 , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Any=None , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : Optional[Any] = batch_size
__UpperCamelCase : Dict = seq_length
__UpperCamelCase : Optional[int] = is_training
__UpperCamelCase : str = use_input_mask
__UpperCamelCase : Any = use_token_type_ids
__UpperCamelCase : str = use_labels
__UpperCamelCase : Tuple = vocab_size
__UpperCamelCase : Dict = hidden_size
__UpperCamelCase : Optional[int] = num_hidden_layers
__UpperCamelCase : Optional[int] = num_attention_heads
__UpperCamelCase : Optional[int] = intermediate_size
__UpperCamelCase : List[Any] = hidden_act
__UpperCamelCase : Any = hidden_dropout_prob
__UpperCamelCase : Any = attention_probs_dropout_prob
__UpperCamelCase : Optional[int] = max_position_embeddings
__UpperCamelCase : Union[str, Any] = type_vocab_size
__UpperCamelCase : List[Any] = type_sequence_label_size
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : str = num_labels
__UpperCamelCase : Tuple = num_choices
__UpperCamelCase : Tuple = scope
def a ( self : List[str] ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Dict = None
if self.use_labels:
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : Dict ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCamelCase__ , )
def a ( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : int ):
"""simple docstring"""
__UpperCamelCase : str = FalconModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : List[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__UpperCamelCase : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , ):
"""simple docstring"""
__UpperCamelCase : str = True
__UpperCamelCase : List[str] = FalconModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Dict = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
__UpperCamelCase : Any = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )
__UpperCamelCase : Any = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : int , ):
"""simple docstring"""
__UpperCamelCase : int = FalconForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , ):
"""simple docstring"""
__UpperCamelCase : List[str] = True
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Dict = FalconForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
__UpperCamelCase : Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ , )
__UpperCamelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : Optional[int] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
__UpperCamelCase : Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
__UpperCamelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
__UpperCamelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : Any = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : List[str] = (FalconForCausalLM,) if is_torch_available() else ()
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Optional[int] = False
lowercase_ : Union[str, Any] = False
def a ( self : List[str] ):
"""simple docstring"""
__UpperCamelCase : Dict = FalconModelTester(self )
__UpperCamelCase : Dict = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase , *__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__UpperCamelCase : List[str] = alibi
self.model_tester.create_and_check_model(lowerCamelCase__ , *lowerCamelCase__ )
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = 3
__UpperCamelCase : Optional[int] = input_dict["""input_ids"""]
__UpperCamelCase : List[Any] = input_ids.ne(1 ).to(lowerCamelCase__ )
__UpperCamelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : Union[str, Any] = FalconForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : List[Any] = """single_label_classification"""
__UpperCamelCase : Optional[Any] = input_dict["""input_ids"""]
__UpperCamelCase : List[str] = input_ids.ne(1 ).to(lowerCamelCase__ )
__UpperCamelCase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : List[str] = FalconForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : List[str] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a ( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Any = input_dict["""input_ids"""]
__UpperCamelCase : List[str] = FalconForCausalLM(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Optional[Any] = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
__UpperCamelCase : Optional[int] = input_ids.shape[0]
__UpperCamelCase : Dict = model._convert_to_rw_cache(result.past_key_values )
__UpperCamelCase : Optional[int] = model._convert_cache_to_standard_format(lowerCamelCase__ , lowerCamelCase__ )
for layer in range(len(lowerCamelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Optional[Any] = 3
__UpperCamelCase : Optional[Any] = """multi_label_classification"""
__UpperCamelCase : Dict = input_dict["""input_ids"""]
__UpperCamelCase : List[Any] = input_ids.ne(1 ).to(lowerCamelCase__ )
__UpperCamelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase : Tuple = FalconForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : List[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a ( self : int ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCamelCase__ , """use_cache""" ):
return
__UpperCamelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
if "use_cache" not in inputs:
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Any = model(**lowerCamelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__UpperCamelCase : List[str] = (
getattr(lowerCamelCase__ , """decoder_layers""" , lowerCamelCase__ )
or getattr(lowerCamelCase__ , """num_decoder_layers""" , lowerCamelCase__ )
or config.num_hidden_layers
)
__UpperCamelCase : Optional[Any] = getattr(lowerCamelCase__ , """num_kv_heads""" , config.num_attention_heads )
__UpperCamelCase : str = getattr(lowerCamelCase__ , """d_model""" , config.hidden_size )
__UpperCamelCase : List[str] = embed_dim // num_attention_heads
__UpperCamelCase : List[Any] = outputs["""past_key_values"""]
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Dict = inputs["""input_ids"""].shape
for i in range(lowerCamelCase__ ):
if config.new_decoder_architecture:
__UpperCamelCase : List[Any] = config.num_attention_heads
elif config.multi_query:
__UpperCamelCase : List[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _A ( unittest.TestCase ):
@slow
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : int = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
__UpperCamelCase : Optional[int] = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(lowerCamelCase__ )
__UpperCamelCase : str = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCamelCase__ )
__UpperCamelCase : Tuple = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
__UpperCamelCase : Tuple = model.generate(**lowerCamelCase__ , do_sample=lowerCamelCase__ , max_new_tokens=19 )
__UpperCamelCase : Dict = tokenizer.batch_decode(lowerCamelCase__ )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def a ( self : Dict ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__UpperCamelCase : Dict = AutoTokenizer.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = FalconForCausalLM.from_pretrained(lowerCamelCase__ )
model.eval()
model.to(lowerCamelCase__ )
__UpperCamelCase : Dict = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCamelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCamelCase__ , do_sample=lowerCamelCase__ , max_new_tokens=4 )
model.generate(**lowerCamelCase__ , do_sample=lowerCamelCase__ , max_new_tokens=4 )
model.generate(**lowerCamelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def a ( self : Tuple ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[str] = FalconForCausalLM.from_pretrained(lowerCamelCase__ )
model.eval()
model.to(device=lowerCamelCase__ )
__UpperCamelCase : Optional[int] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCamelCase__ )
# Test results are the same with and without cache
__UpperCamelCase : Dict = model.generate(**lowerCamelCase__ , do_sample=lowerCamelCase__ , max_new_tokens=20 , use_cache=lowerCamelCase__ )
__UpperCamelCase : int = model.generate(**lowerCamelCase__ , do_sample=lowerCamelCase__ , max_new_tokens=20 , use_cache=lowerCamelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 269 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=18 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=None , ) -> List[Any]:
a_ : List[Any] = size if size is not None else {'''height''': 20, '''width''': 20}
a_ : Optional[int] = parent
a_ : List[str] = batch_size
a_ : List[str] = num_channels
a_ : str = image_size
a_ : Any = min_resolution
a_ : Dict = max_resolution
a_ : List[str] = size
a_ : str = do_normalize
a_ : Optional[Any] = do_convert_rgb
a_ : Any = [512, 1024, 2048, 4096]
a_ : Dict = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : Tuple = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
a_ : List[str] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = PixaStructImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
a_ : Tuple = self.image_processing_class(**self.image_processor_dict )
a_ : int = 2048
a_ : Tuple = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# Initialize image_processor
a_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : int = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : List[Any] = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# Initialize image_processor
a_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
a_ : Any = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : str = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
a_ : str = '''Hello'''
a_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : str = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
# Initialize image_processor
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Union[str, Any] = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
# Initialize image_processor
a_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
a_ : Any = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Tuple = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
a_ : Tuple = 3
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
# Initialize image_processor
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Dict = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase_ ( snake_case__ ):
_a : List[str] = 'Wav2Vec2FeatureExtractor'
_a : Dict = 'AutoTokenizer'
def __init__( self : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int ):
super().__init__(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = self.feature_extractor
lowerCamelCase_ : Tuple = False
@classmethod
def __a ( cls : List[str] , lowerCamelCase : Optional[int] , **lowerCamelCase : Optional[Any] ):
try:
return super().from_pretrained(lowerCamelCase , **lowerCamelCase )
except OSError:
warnings.warn(
F"Loading a tokenizer inside {cls.__name__} from a config that does not"
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , lowerCamelCase , )
lowerCamelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCamelCase_ : Tuple = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase , **lowerCamelCase )
return cls(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
def __call__( self : Tuple , *lowerCamelCase : Any , **lowerCamelCase : Optional[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase_ : List[str] = kwargs.pop('raw_speech' )
else:
lowerCamelCase_ : Any = kwargs.pop('audio' , lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('sampling_rate' , lowerCamelCase )
lowerCamelCase_ : Any = kwargs.pop('text' , lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCamelCase_ : Any = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase_ : Tuple = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_ : Optional[int] = encodings['input_ids']
return inputs
def __a ( self : str , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase , **lowerCamelCase )
lowerCamelCase_ : Any = kwargs.pop('input_features' , lowerCamelCase )
lowerCamelCase_ : Tuple = kwargs.pop('labels' , lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCamelCase_ : List[str] = args[0]
lowerCamelCase_ : List[Any] = args[1:]
if input_features is not None:
lowerCamelCase_ : str = self.feature_extractor.pad(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if labels is not None:
lowerCamelCase_ : List[str] = self.tokenizer.pad(lowerCamelCase , **lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase_ : Tuple = labels['input_ids']
return input_features
def __a ( self : Dict , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[Any] ):
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __a ( self : Union[str, Any] , *lowerCamelCase : int , **lowerCamelCase : int ):
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@contextmanager
def __a ( self : int ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase_ : Any = True
lowerCamelCase_ : Any = self.tokenizer
yield
lowerCamelCase_ : List[Any] = self.feature_extractor
lowerCamelCase_ : List[Any] = False
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Dict ={
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] =[
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] =[
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple =[
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 364 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["flax"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax"""] )
| 706 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
# Initialise PyTorch model
_lowercase : int = AlbertConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowercase : Tuple = AlbertForPreTraining(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase: Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 600 | 0 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ = 1000 ):
"""simple docstring"""
_UpperCAmelCase = -1
_UpperCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_UpperCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
_UpperCAmelCase = n - a - b
if c * c == (a * a + b * b):
_UpperCAmelCase = a * b * c
if candidate >= product:
_UpperCAmelCase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
__snake_case : Dict ='src/transformers'
# Matches is_xxx_available()
__snake_case : Optional[int] =re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__snake_case : Any =re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__snake_case : List[Any] =re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__snake_case : str =re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__snake_case : str =re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__snake_case : List[Any] =re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__snake_case : Dict =re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__snake_case : int =re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__snake_case : Union[str, Any] =re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__snake_case : List[Any] =re.compile(R'^\s*try:')
# Catches a line with else:
__snake_case : Any =re.compile(R'^\s*else:')
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
if _re_test_backend.search(lowerCamelCase_) is None:
return None
lowerCAmelCase__ : Any = [b[0] for b in _re_backend.findall(lowerCamelCase_)]
backends.sort()
return "_and_".join(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Any):
'''simple docstring'''
with open(lowerCamelCase_ ,'''r''' ,encoding='''utf-8''' ,newline='''\n''') as f:
lowerCAmelCase__ : int = f.readlines()
lowerCAmelCase__ : Optional[int] = 0
while line_index < len(lowerCamelCase_) and not lines[line_index].startswith('''_import_structure = {'''):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase_):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ : Dict = []
while not lines[line_index].startswith('''if TYPE_CHECKING''') and find_backend(lines[line_index]) is None:
lowerCAmelCase__ : Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase_):
lowerCAmelCase__ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_).groups()[0]
lowerCAmelCase__ : int = re.findall('''\[([^\]]+)\]''' ,lowerCamelCase_)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''')])
line_index += 1
continue
lowerCAmelCase__ : List[str] = _re_import_struct_key_value.search(lowerCamelCase_)
if single_line_import_search is not None:
lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''') if len(lowerCamelCase_) > 0]
objects.extend(lowerCamelCase_)
elif line.startswith(''' ''' * 8 + '''"'''):
objects.append(line[9:-3])
line_index += 1
lowerCAmelCase__ : Any = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING'''):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ : Optional[Any] = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
lowerCAmelCase__ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(''' ''' * 4):
lowerCAmelCase__ : Any = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase_) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase_).groups()[0])
elif _re_import_struct_add_many.search(lowerCamelCase_) is not None:
lowerCAmelCase__ : Tuple = _re_import_struct_add_many.search(lowerCamelCase_).groups()[0].split(''', ''')
lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(lowerCamelCase_) > 0]
objects.extend(lowerCamelCase_)
elif _re_between_brackets.search(lowerCamelCase_) is not None:
lowerCAmelCase__ : Tuple = _re_between_brackets.search(lowerCamelCase_).groups()[0].split(''', ''')
lowerCAmelCase__ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_) > 0]
objects.extend(lowerCamelCase_)
elif _re_quote_object.search(lowerCamelCase_) is not None:
objects.append(_re_quote_object.search(lowerCamelCase_).groups()[0])
elif line.startswith(''' ''' * 8 + '''"'''):
objects.append(line[9:-3])
elif line.startswith(''' ''' * 12 + '''"'''):
objects.append(line[13:-3])
line_index += 1
lowerCAmelCase__ : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ : str = []
while (
line_index < len(lowerCamelCase_)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith('''else''')
):
lowerCAmelCase__ : Tuple = lines[line_index]
lowerCAmelCase__ : Union[str, Any] = _re_import.search(lowerCamelCase_)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', '''))
elif line.startswith(''' ''' * 8):
objects.append(line[8:-2])
line_index += 1
lowerCAmelCase__ : Optional[int] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase_):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ : Dict = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
lowerCAmelCase__ : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(''' ''' * 8):
lowerCAmelCase__ : Any = lines[line_index]
lowerCAmelCase__ : List[Any] = _re_import.search(lowerCamelCase_)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', '''))
elif line.startswith(''' ''' * 12):
objects.append(line[12:-2])
line_index += 1
lowerCAmelCase__ : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[str]):
'''simple docstring'''
def find_duplicates(lowerCamelCase_ : Any):
return [k for k, v in collections.Counter(lowerCamelCase_).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ : int = []
for key in import_dict_objects.keys():
lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""")
lowerCAmelCase__ : str = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""")
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
lowerCAmelCase__ : List[str] = '''base imports''' if key == '''none''' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""")
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""")
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""")
return errors
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = []
for root, _, files in os.walk(lowerCamelCase_):
if "__init__.py" in files:
lowerCAmelCase__ : str = os.path.join(lowerCamelCase_ ,'''__init__.py''')
lowerCAmelCase__ : Optional[Any] = parse_init(lowerCamelCase_)
if objects is not None:
lowerCAmelCase__ : List[Any] = analyze_results(*lowerCamelCase_)
if len(lowerCamelCase_) > 0:
lowerCAmelCase__ : int = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(lowerCamelCase_))
if len(lowerCamelCase_) > 0:
raise ValueError('''\n\n'''.join(lowerCamelCase_))
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = []
for path, directories, files in os.walk(lowerCamelCase_):
for folder in directories:
# Ignore private modules
if folder.startswith('''_'''):
directories.remove(lowerCamelCase_)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase_) / folder).glob('''*.py'''))) == 0:
continue
lowerCAmelCase__ : Optional[Any] = str((Path(lowerCamelCase_) / folder).relative_to(lowerCamelCase_))
lowerCAmelCase__ : Tuple = short_path.replace(os.path.sep ,'''.''')
submodules.append(lowerCamelCase_)
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ : List[Any] = str((Path(lowerCamelCase_) / fname).relative_to(lowerCamelCase_))
lowerCAmelCase__ : List[str] = short_path.replace('''.py''' ,'''''').replace(os.path.sep ,'''.''')
if len(submodule.split('''.''')) == 1:
submodules.append(lowerCamelCase_)
return submodules
__snake_case : List[Any] =[
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = importlib.util.spec_from_file_location(
'''transformers''' ,os.path.join(lowerCamelCase_ ,'''__init__.py''') ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
lowerCAmelCase__ : Tuple = spec.loader.load_module()
lowerCAmelCase__ : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCamelCase_) > 0:
lowerCAmelCase__ : Optional[int] = '''\n'''.join(f"""- {module}""" for module in module_not_registered)
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''')
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 700 |
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase_ : list[int]):
'''simple docstring'''
return len(set(lowerCamelCase_)) == len(lowerCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : List[Any] = logging.get_logger(__name__)
a_ : List[Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
a_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
a_ : int = {"""facebook/blenderbot_small-90M""": 512}
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
lowerCamelCase_ = set(UpperCAmelCase_ )
return pairs
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase="__start__" , UpperCamelCase="__end__" , UpperCamelCase="__unk__" , UpperCamelCase="__null__" , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , pad_token=UpperCamelCase , **UpperCamelCase )
with open(UpperCamelCase , encoding="utf-8" ) as vocab_handle:
lowerCamelCase_ = json.load(UpperCamelCase )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase , encoding="utf-8" ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split("\n" )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in merges]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = {}
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.encoder )
def snake_case ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = re.sub("([.,!?()])" , r" \1" , UpperCamelCase )
lowerCamelCase_ = re.sub("(')" , r" \1 " , UpperCamelCase )
lowerCamelCase_ = re.sub(r"\s{2,}" , " " , UpperCamelCase )
if "\n" in token:
lowerCamelCase_ = token.replace("\n" , " __newln__" )
lowerCamelCase_ = token.split(" " )
lowerCamelCase_ = []
for token in tokens:
if not len(UpperCamelCase ):
continue
lowerCamelCase_ = token.lower()
lowerCamelCase_ = tuple(UpperCamelCase )
lowerCamelCase_ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowerCamelCase_ = get_pairs(UpperCamelCase )
if not pairs:
words.append(UpperCamelCase )
continue
while True:
lowerCamelCase_ = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(UpperCamelCase ):
try:
lowerCamelCase_ = word.index(UpperCamelCase , UpperCamelCase )
new_word.extend(word[i:j] )
lowerCamelCase_ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(UpperCamelCase )
lowerCamelCase_ = new_word
if len(UpperCamelCase ) == 1:
break
else:
lowerCamelCase_ = get_pairs(UpperCamelCase )
lowerCamelCase_ = "@@ ".join(UpperCamelCase )
lowerCamelCase_ = word[:-4]
lowerCamelCase_ = word
words.append(UpperCamelCase )
return " ".join(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = re.findall(r"\S+\n?" , UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase ).split(" " ) ) )
return split_tokens
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = token.lower()
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.decoder.get(UpperCamelCase , self.unk_token )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = " ".join(UpperCamelCase ).replace("@@ " , "" ).strip()
return out_string
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + "\n" )
lowerCamelCase_ = 0
with open(UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase_ = token_index
writer.write(" ".join(UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 675 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCamelCase_ = remove_duplicates(key.upper() )
lowerCamelCase_ = len(UpperCAmelCase_ )
# First fill cipher with key characters
lowerCamelCase_ = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCAmelCase_ ) , 26 ):
lowerCamelCase_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCamelCase_ = alphabet[i - offset]
lowerCamelCase_ = char
return cipher_alphabet
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ):
return "".join(cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() )
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ):
lowerCamelCase_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() )
def __snake_case ( ):
lowerCamelCase_ = input("Enter message to encode or decode: " ).strip()
lowerCamelCase_ = input("Enter keyword: " ).strip()
lowerCamelCase_ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowerCamelCase_ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowerCamelCase_ = create_cipher_map(UpperCAmelCase_ )
print(func(UpperCAmelCase_ , UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class a_ :
def __init__( self :Union[str, Any] , _lowercase :int , _lowercase :Union[str, Any]=14 , _lowercase :Any=7 , _lowercase :Optional[int]=True , _lowercase :Dict=True , _lowercase :int=True , _lowercase :Optional[int]=True , _lowercase :int=True , _lowercase :Any=99 , _lowercase :Optional[Any]=32 , _lowercase :Union[str, Any]=5 , _lowercase :str=4 , _lowercase :Union[str, Any]=37 , _lowercase :Any="gelu" , _lowercase :Tuple=0.1 , _lowercase :int=0.1 , _lowercase :Any=512 , _lowercase :int=16 , _lowercase :Union[str, Any]=2 , _lowercase :int=0.02 , _lowercase :Tuple=3 , _lowercase :Optional[int]=4 , _lowercase :Dict=None , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = use_mc_token_ids
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = self.vocab_size - 1
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
if self.use_mc_token_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __a ( self :Union[str, Any]) -> List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __a ( self :str , _lowercase :List[str] , _lowercase :Tuple , _lowercase :str , _lowercase :int , _lowercase :Tuple , *_lowercase :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = CTRLModel(config=_lowercase)
model.to(_lowercase)
model.eval()
model(_lowercase , token_type_ids=_lowercase , head_mask=_lowercase)
model(_lowercase , token_type_ids=_lowercase)
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) , config.n_layer)
def __a ( self :Optional[Any] , _lowercase :str , _lowercase :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :str , _lowercase :Union[str, Any] , *_lowercase :Dict) -> Tuple:
UpperCAmelCase_ = CTRLLMHeadModel(_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __a ( self :str , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Any , _lowercase :Union[str, Any] , *_lowercase :Any) -> Optional[int]:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = CTRLForSequenceClassification(_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
@require_torch
class a_ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Optional[int] =(CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ : str =(CTRLLMHeadModel,) if is_torch_available() else ()
UpperCamelCase__ : List[str] =(
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : List[str] =True
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : str =False
def __a ( self :Any , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Dict , _lowercase :int , _lowercase :Optional[int]) -> str:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __a ( self :Optional[Any]) -> Optional[int]:
UpperCAmelCase_ = CTRLModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , n_embd=37)
def __a ( self :Dict) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Union[str, Any]) -> int:
self.config_tester.run_common_tests()
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowercase)
def __a ( self :Tuple) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowercase)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def __a ( self :Optional[int]) -> Dict:
pass
@slow
def __a ( self :Optional[Any]) -> Optional[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = CTRLModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
@unittest.skip('''The model doesn\'t support left padding''') # and it's not used enough to be worth fixing :)
def __a ( self :Tuple) -> Tuple:
pass
@require_torch
class a_ ( unittest.TestCase ):
def __a ( self :str) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __a ( self :str) -> Any:
UpperCAmelCase_ = CTRLLMHeadModel.from_pretrained('''ctrl''')
model.to(_lowercase)
UpperCAmelCase_ = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=_lowercase) # Legal the president is
UpperCAmelCase_ = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase_ = model.generate(_lowercase , do_sample=_lowercase)
self.assertListEqual(output_ids[0].tolist() , _lowercase)
| 721 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return int(x / 2**20 )
class a_ :
def __enter__( self :Any) -> str:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase_ = torch.cuda.memory_allocated()
return self
def __exit__( self :Tuple , *_lowercase :Tuple) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
UpperCAmelCase_ = bamb(self.end - self.begin)
UpperCAmelCase_ = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A ( __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = "bert-base-cased" , __UpperCAmelCase = 320 , __UpperCAmelCase = 160 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
UpperCAmelCase_ = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"train[:{n_train}]", '''validation''': f"validation[:{n_val}]"} )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config['''lr''']
UpperCAmelCase_ = int(config['''num_epochs'''] )
UpperCAmelCase_ = int(config['''seed'''] )
UpperCAmelCase_ = int(config['''batch_size'''] )
UpperCAmelCase_ = args.model_name_or_path
set_seed(__UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase )
# Instantiate optimizer
UpperCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ = optimizer_cls(params=model.parameters() , lr=__UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ = 1
UpperCAmelCase_ = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=0 , num_training_steps=__UpperCAmelCase , )
else:
UpperCAmelCase_ = DummyScheduler(__UpperCAmelCase , total_num_steps=__UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ = 0
# Now we train the model
UpperCAmelCase_ = {}
for epoch in range(__UpperCAmelCase , __UpperCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
UpperCAmelCase_ = model(**__UpperCAmelCase )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase_ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__UpperCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=__UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=__UpperCAmelCase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=__UpperCAmelCase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=__UpperCAmelCase , default=1 , help='''Number of train epochs.''' , )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 561 | 0 |
'''simple docstring'''
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[0] * len(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
_UpperCamelCase =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCamelCase =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCamelCase =j
return prefix_result
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return max(prefix_function(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = """deformable_detr"""
lowerCAmelCase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=300 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[int]=6 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : List[Any]=6 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]="relu" , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : int=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple="sine" , UpperCamelCase__ : Optional[Any]="resnet50" , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[Any]=300 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=0.25 , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : Union[str, Any] , ) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCamelCase =backbone_config.get('''model_type''' )
_UpperCamelCase =CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase =config_class.from_dict(UpperCamelCase__ )
_UpperCamelCase =use_timm_backbone
_UpperCamelCase =backbone_config
_UpperCamelCase =num_channels
_UpperCamelCase =num_queries
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =d_model
_UpperCamelCase =encoder_ffn_dim
_UpperCamelCase =encoder_layers
_UpperCamelCase =encoder_attention_heads
_UpperCamelCase =decoder_ffn_dim
_UpperCamelCase =decoder_layers
_UpperCamelCase =decoder_attention_heads
_UpperCamelCase =dropout
_UpperCamelCase =attention_dropout
_UpperCamelCase =activation_dropout
_UpperCamelCase =activation_function
_UpperCamelCase =init_std
_UpperCamelCase =init_xavier_std
_UpperCamelCase =encoder_layerdrop
_UpperCamelCase =auxiliary_loss
_UpperCamelCase =position_embedding_type
_UpperCamelCase =backbone
_UpperCamelCase =use_pretrained_backbone
_UpperCamelCase =dilation
# deformable attributes
_UpperCamelCase =num_feature_levels
_UpperCamelCase =encoder_n_points
_UpperCamelCase =decoder_n_points
_UpperCamelCase =two_stage
_UpperCamelCase =two_stage_num_proposals
_UpperCamelCase =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase =class_cost
_UpperCamelCase =bbox_cost
_UpperCamelCase =giou_cost
# Loss coefficients
_UpperCamelCase =mask_loss_coefficient
_UpperCamelCase =dice_loss_coefficient
_UpperCamelCase =bbox_loss_coefficient
_UpperCamelCase =giou_loss_coefficient
_UpperCamelCase =eos_coefficient
_UpperCamelCase =focal_alpha
_UpperCamelCase =disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase__ ( self : Tuple ) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Tuple ) -> int:
return self.d_model
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase =self.backbone_config.to_dict()
_UpperCamelCase =self.__class__.model_type
return output
| 404 | 1 |
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : int = 0
__lowerCamelCase : int = len(UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if len(UpperCAmelCase ) <= 1:
return arr, 0
__lowerCamelCase : Tuple = len(UpperCAmelCase ) // 2
__lowerCamelCase : Dict = arr[0:mid]
__lowerCamelCase : Tuple = arr[mid:]
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Any = count_inversions_recursive(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : List[Any] = _count_cross_inversions(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : str = []
__lowerCamelCase : Union[str, Any] = 0
while i < len(UpperCAmelCase ) and j < len(UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCamelCase : Tuple = count_inversions_bf(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Any = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , UpperCAmelCase )
# an empty list should also have zero inversions
__lowerCamelCase : Tuple = []
__lowerCamelCase : str = count_inversions_bf(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : List[Any] = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , UpperCAmelCase )
if __name__ == "__main__":
main()
| 458 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Any=1_024 ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : str = [], []
__lowerCamelCase : Any = list(zip(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase , __lowerCamelCase : List[str] = sorted_examples[0]
def is_too_big(UpperCAmelCase : Optional[Any] ):
return tok(UpperCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase : Union[str, Any] = new_src + """ """ + src
__lowerCamelCase : str = new_tgt + """ """ + tgt
if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : str = src, tgt
else: # can fit, keep adding
__lowerCamelCase , __lowerCamelCase : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
return finished_src, finished_tgt
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Path , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = Path(UpperCAmelCase )
save_path.mkdir(exist_ok=UpperCAmelCase )
for split in ["train"]:
__lowerCamelCase , __lowerCamelCase : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase , __lowerCamelCase : int = pack_examples(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
print(f"""packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
for split in ["val", "test"]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.source""" )
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.target""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCAmelCase )
parser.add_argument("""--save_path""" , type=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = parser.parse_args()
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 458 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowercase : Optional[Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
lowercase : List[str] = 'hopper-medium-v2'
lowercase : Dict = gym.make(env_name)
lowercase : List[Any] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
lowercase : int = env.reset()
lowercase : Optional[Any] = 0
lowercase : int = 0
lowercase : str = 1000
lowercase : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowercase : Tuple = pipeline(obs, planning_horizon=32)
# execute action in environment
lowercase , lowercase , lowercase , lowercase : List[Any] = env.step(denorm_actions)
lowercase : List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
f" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
lowercase : List[Any] = next_observation
except KeyboardInterrupt:
pass
print(f"Total reward: {total_reward}") | 557 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 0 |
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Optional[Any] ) -> float:
if digit_amount > 0:
return round(number - int(snake_case_ ) , snake_case_ )
return number - int(snake_case_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 711 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int ) -> Tuple:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__snake_case = '''lm_head'''
__snake_case = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
__snake_case = getattr(snake_case_ , snake_case_ ).shape
else:
__snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
else:
__snake_case = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[Any] ) -> Dict:
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(snake_case_ )[0].split('''.''' )[-2]
__snake_case = mapped_key.replace('''*''' , snake_case_ )
if "weight_g" in name:
__snake_case = '''weight_g'''
elif "weight_v" in name:
__snake_case = '''weight_v'''
elif "bias" in name:
__snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = '''weight'''
else:
__snake_case = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Tuple , snake_case_ : int , snake_case_ : str , snake_case_ : int ) -> Optional[Any]:
__snake_case = full_name.split('''conv_layers.''' )[-1]
__snake_case = name.split('''.''' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : Any=None , snake_case_ : str=True ) -> Dict:
if config_path is not None:
__snake_case = UniSpeechConfig.from_pretrained(snake_case_ )
else:
__snake_case = UniSpeechConfig()
if is_finetuned:
if dict_path:
__snake_case = Dictionary.load_from_json(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(snake_case_ , '''vocab.json''' )
if not os.path.isdir(snake_case_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case = 42
__snake_case = 43
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(snake_case_ , snake_case_ )
__snake_case = WavaVecaPhonemeCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case_ , )
__snake_case = True if config.feat_extract_norm == '''layer''' else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
__snake_case = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
__snake_case = UniSpeechForCTC(snake_case_ )
else:
__snake_case = UniSpeechForPreTraining(snake_case_ )
if is_finetuned:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__snake_case = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , snake_case_ )
hf_unispeech.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 388 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[Any] = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
SCREAMING_SNAKE_CASE : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
SCREAMING_SNAKE_CASE : str = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Union[str, Any] , snake_case : Dict=0 ) -> str:
if str(snake_case ).startswith('''mps''' ):
__UpperCAmelCase : str = torch.manual_seed(snake_case )
else:
__UpperCAmelCase : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
__UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
__UpperCAmelCase : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(snake_case ) ).to(snake_case )
__UpperCAmelCase : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 266 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase :List[Any] = logging.get_logger(__name__)
def _a ( _lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__UpperCAmelCase : int = BitConfig(
conv_layer=_lowercase , num_labels=1000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def _a ( _lowercase : Tuple ):
'''simple docstring'''
if "stem.conv" in name:
__UpperCAmelCase : Any = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
__UpperCAmelCase : Dict = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
__UpperCAmelCase : List[Any] = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
__UpperCAmelCase : List[str] = '''bit.encoder.''' + name
return name
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _a ( _lowercase : Tuple , _lowercase : Dict , _lowercase : Tuple=False ):
'''simple docstring'''
__UpperCAmelCase : Any = get_config(_lowercase )
# load original model from timm
__UpperCAmelCase : Tuple = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
__UpperCAmelCase : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
__UpperCAmelCase : Optional[Any] = state_dict.pop(_lowercase )
__UpperCAmelCase : int = val.squeeze() if '''head''' in key else val
# load HuggingFace model
__UpperCAmelCase : Any = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
__UpperCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=_lowercase ) )
__UpperCAmelCase : Union[str, Any] = transform.transforms
__UpperCAmelCase : str = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__UpperCAmelCase : List[Any] = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : Any = transform(_lowercase ).unsqueeze(0 )
__UpperCAmelCase : List[Any] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(_lowercase )
__UpperCAmelCase : Union[str, Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
__UpperCAmelCase : Optional[int] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 266 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 79 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_lowercase = datasets.utils.logging.get_logger(__name__)
_lowercase = ['''names''', '''prefix''']
_lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
_lowercase = ['''encoding_errors''', '''on_bad_lines''']
_lowercase = ['''date_format''']
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
_lowerCamelCase: str = ","
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[int, List[int], str]] = "infer"
_lowerCamelCase: Optional[List[str]] = None
_lowerCamelCase: Optional[List[str]] = None
_lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None
_lowerCamelCase: Optional[Union[List[int], List[str]]] = None
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: bool = True
_lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None
_lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None
_lowerCamelCase: Optional[list] = None
_lowerCamelCase: Optional[list] = None
_lowerCamelCase: bool = False
_lowerCamelCase: Optional[Union[int, List[int]]] = None
_lowerCamelCase: Optional[int] = None
_lowerCamelCase: Optional[Union[str, List[str]]] = None
_lowerCamelCase: bool = True
_lowerCamelCase: bool = True
_lowerCamelCase: bool = False
_lowerCamelCase: bool = True
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: str = "."
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: str = '"'
_lowerCamelCase: int = 0
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: bool = True
_lowerCamelCase: bool = True
_lowerCamelCase: int = 0
_lowerCamelCase: bool = True
_lowerCamelCase: bool = False
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: int = 10000
_lowerCamelCase: Optional[datasets.Features] = None
_lowerCamelCase: Optional[str] = "strict"
_lowerCamelCase: Literal["error", "warn", "skip"] = "error"
_lowerCamelCase: Optional[str] = None
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
if self.delimiter is not None:
A = self.delimiter
if self.column_names is not None:
A = self.column_names
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_lowerCamelCase: Any = CsvConfig
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_ ,(str, list, tuple) ):
A = data_files
if isinstance(A_ ,A_ ):
A = [files]
A = [dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
A = []
for split_name, files in data_files.items():
if isinstance(A_ ,A_ ):
A = [files]
A = [dl_manager.iter_files(A_ ) for file in files]
splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) )
return splits
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table:
if self.config.features is not None:
A = self.config.features.arrow_schema
if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ):
# cheaper cast
A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A = table_cast(A_ ,A_ )
return pa_table
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]:
A = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ):
A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(A_ ):
A = pa.Table.from_pandas(A_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A_ )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' )
raise | 91 | 0 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: pyspark.sql.DataFrame , UpperCamelCase__: Optional[NamedSplit] = None , UpperCamelCase__: Optional[Features] = None , UpperCamelCase__: bool = True , UpperCamelCase__: str = None , UpperCamelCase__: bool = False , UpperCamelCase__: str = None , UpperCamelCase__: bool = True , UpperCamelCase__: str = "arrow" , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(
split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : int = load_from_cache_file
lowerCamelCase__ : List[str] = file_format
lowerCamelCase__ : Tuple = Spark(
df=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , working_dir=UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCamelCase_ ( self: Optional[Any] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase__ : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 631 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 1 |
'''simple docstring'''
import argparse
import copy
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = {}
with open(UpperCamelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_A = []
_list.append([line.split()[1], line.split()[2]] )
_A = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_A = []
_list.append([line.split()[0], line.split()[2]] )
_A = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : str ):
with open(UpperCamelCase_ ) as f:
_A = f.read(1 )
_A = start_node
_A = []
_A = start_node
_A = 0
while visiting not in first_solution:
_A = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase_ ) and k[0] not in first_solution:
_A = k[1]
_A = k[0]
first_solution.append(UpperCamelCase_ )
_A = distance_of_first_solution + int(UpperCamelCase_ )
_A = best_node
first_solution.append(UpperCamelCase_ )
_A = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_A = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Dict ):
_A = []
for n in solution[1:-1]:
_A = solution.index(UpperCamelCase_ )
for kn in solution[1:-1]:
_A = solution.index(UpperCamelCase_ )
if n == kn:
continue
_A = copy.deepcopy(UpperCamelCase_ )
_A = kn
_A = n
_A = 0
for k in _tmp[:-1]:
_A = _tmp[_tmp.index(UpperCamelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_A = distance + int(i[1] )
_tmp.append(UpperCamelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_A = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __snake_case : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : Tuple ):
_A = 1
_A = first_solution
_A = []
_A = distance_of_first_solution
_A = solution
while count <= iters:
_A = find_neighborhood(UpperCamelCase_ , UpperCamelCase_ )
_A = 0
_A = neighborhood[index_of_best_solution]
_A = len(UpperCamelCase_ ) - 1
_A = False
while not found:
_A = 0
while i < len(UpperCamelCase_ ):
if best_solution[i] != solution[i]:
_A = best_solution[i]
_A = solution[i]
break
_A = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_A = True
_A = best_solution[:-1]
_A = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_A = cost
_A = solution
else:
_A = index_of_best_solution + 1
_A = neighborhood[index_of_best_solution]
if len(UpperCamelCase_ ) >= size:
tabu_list.pop(0 )
_A = count + 1
return best_solution_ever, best_cost
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int]=None ):
_A = generate_neighbours(args.File )
_A , _A = generate_first_solution(
args.File , UpperCamelCase_ )
_A , _A = tabu_search(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 107 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
a__ ,a__ = [], []
while len(__snake_case) > 1:
a__ ,a__ = min(__snake_case), max(__snake_case)
start.append(__snake_case)
end.append(__snake_case)
collection.remove(__snake_case)
collection.remove(__snake_case)
end.reverse()
return start + collection + end
if __name__ == "__main__":
__a : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
__a : List[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 705 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Optional[Any] , __A: List[Any] , __A: Optional[int]=13 , __A: Tuple=7 , __A: Optional[Any]=True , __A: Tuple=True , __A: str=99 , __A: int=32 , __A: Union[str, Any]=5 , __A: Optional[Any]=4 , __A: Any=37 , __A: Union[str, Any]="gelu" , __A: int=0.1 , __A: Optional[Any]=0.1 , __A: Optional[int]=50 , __A: str=0.0_2 , __A: Dict=True , __A: Dict=None , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = initializer_range
a__ = use_labels
a__ = scope
def lowercase ( self: List[Any] ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase ( self: int ):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__A , initializer_range=self.initializer_range , )
def lowercase ( self: List[str] ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.prepare_config_and_inputs()
a__ = True
a__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self: str , __A: List[str] , __A: Optional[int] , __A: Union[str, Any] , __A: Optional[int] , **__A: Union[str, Any] , ):
'''simple docstring'''
a__ = BertGenerationEncoder(config=__A )
model.to(__A )
model.eval()
a__ = model(__A , attention_mask=__A )
a__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: Optional[Any] , __A: Any , __A: int , __A: Optional[int] , __A: int , __A: List[Any] , __A: Any , **__A: List[str] , ):
'''simple docstring'''
a__ = True
a__ = BertGenerationEncoder(config=__A )
model.to(__A )
model.eval()
a__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
a__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: Dict , __A: Tuple , __A: str , __A: str , __A: Any , __A: int , __A: Any , **__A: Dict , ):
'''simple docstring'''
a__ = True
a__ = True
a__ = BertGenerationDecoder(config=__A ).to(__A ).eval()
# first forward pass
a__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , use_cache=__A , )
a__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] , dim=-1 )
a__ = torch.cat([input_mask, next_mask] , dim=-1 )
a__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_hidden_states=__A , )['''hidden_states'''][0]
a__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )['''hidden_states'''][0]
# select random slice
a__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) )
def lowercase ( self: Optional[Any] , __A: List[Any] , __A: List[Any] , __A: Union[str, Any] , __A: Optional[Any] , *__A: Optional[int] , ):
'''simple docstring'''
a__ = BertGenerationDecoder(__A )
model.to(__A )
model.eval()
a__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self: Dict ):
'''simple docstring'''
a__ ,a__ ,a__ ,a__ = self.prepare_config_and_inputs()
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE =(BertGenerationDecoder,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE =(
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = BertGenerationEncoderTester(self )
a__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowercase ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowercase ( self: Any ):
'''simple docstring'''
a__ ,a__ ,a__ ,a__ = self.model_tester.prepare_config_and_inputs()
a__ = '''bert'''
self.model_tester.create_and_check_model(__A , __A , __A , __A )
def lowercase ( self: int ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a__ = None
self.model_tester.create_and_check_model_as_decoder(
__A , __A , __A , __A , __A , __A , )
def lowercase ( self: str ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__A )
@slow
def lowercase ( self: Dict ):
'''simple docstring'''
a__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(__A )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
a__ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
a__ = model(__A )[0]
a__ = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , __A )
a__ = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self: List[Any] ):
'''simple docstring'''
a__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
a__ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
a__ = model(__A )[0]
a__ = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , __A )
a__ = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 200 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a__ : Union[str, Any] = NewType('DataClass', Any)
a__ : int = NewType('DataClassType', Any)
def UpperCAmelCase_ ( _UpperCAmelCase :List[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def UpperCAmelCase_ ( _UpperCAmelCase :list ) -> Callable[[str], Any]:
'''simple docstring'''
A_ = {str(_UpperCAmelCase ): choice for choice in choices}
return lambda _UpperCAmelCase : str_to_choice.get(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( *,
_UpperCAmelCase :Union[str, List[str]] = None , _UpperCAmelCase :str = None , _UpperCAmelCase :Any = dataclasses.MISSING , _UpperCAmelCase :Callable[[], Any] = dataclasses.MISSING , _UpperCAmelCase :dict = None , **_UpperCAmelCase :List[Any] , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A_ = {}
if aliases is not None:
A_ = aliases
if help is not None:
A_ = help
return dataclasses.field(metadata=_UpperCAmelCase , default=_UpperCAmelCase , default_factory=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Iterable[DataClassType]
def __init__( self ,__snake_case ,**__snake_case ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A_ = ArgumentDefaultsHelpFormatter
super().__init__(**__snake_case )
if dataclasses.is_dataclass(__snake_case ):
A_ = [dataclass_types]
A_ = list(__snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__snake_case )
@staticmethod
def __UpperCAmelCase ( __snake_case ,__snake_case ):
"""simple docstring"""
A_ = f'--{field.name}'
A_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,__snake_case ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
A_ = kwargs.pop('''aliases''' ,[] )
if isinstance(__snake_case ,__snake_case ):
A_ = [aliases]
A_ = getattr(field.type ,'''__origin__''' ,field.type )
if origin_type is Union or (hasattr(__snake_case ,'''UnionType''' ) and isinstance(__snake_case ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__snake_case ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f' Problem encountered in field \'{field.name}\'.' )
if type(__snake_case ) not in field.type.__args__:
# filter `str` in Union
A_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A_ = getattr(field.type ,'''__origin__''' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A_ = (
field.type.__args__[0] if isinstance(__snake_case ,field.type.__args__[1] ) else field.type.__args__[1]
)
A_ = getattr(field.type ,'''__origin__''' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A_ = {}
if origin_type is Literal or (isinstance(field.type ,__snake_case ) and issubclass(field.type ,__snake_case )):
if origin_type is Literal:
A_ = field.type.__args__
else:
A_ = [x.value for x in field.type]
A_ = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
A_ = field.default
else:
A_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A_ = copy(__snake_case )
# Hack because type=bool in argparse does not behave as we want.
A_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A_ = default
# This tells argparse we accept 0 or 1 value after --field_name
A_ = '''?'''
# This is the value that will get picked if we do --field_name (without value)
A_ = True
elif isclass(__snake_case ) and issubclass(__snake_case ,__snake_case ):
A_ = field.type.__args__[0]
A_ = '''+'''
if field.default_factory is not dataclasses.MISSING:
A_ = field.default_factory()
elif field.default is dataclasses.MISSING:
A_ = True
else:
A_ = field.type
if field.default is not dataclasses.MISSING:
A_ = field.default
elif field.default_factory is not dataclasses.MISSING:
A_ = field.default_factory()
else:
A_ = True
parser.add_argument(__snake_case ,*__snake_case ,**__snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A_ = False
parser.add_argument(f'--no_{field.name}' ,action='''store_false''' ,dest=field.name ,**__snake_case )
def __UpperCAmelCase ( self ,__snake_case ):
"""simple docstring"""
if hasattr(__snake_case ,'''_argument_group_name''' ):
A_ = self.add_argument_group(dtype._argument_group_name )
else:
A_ = self
try:
A_ = get_type_hints(__snake_case )
except NameError:
raise RuntimeError(
f'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__snake_case ):
A_ = '''.'''.join(map(__snake_case ,sys.version_info[:3] ) )
raise RuntimeError(
f'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__snake_case ):
if not field.init:
continue
A_ = type_hints[field.name]
self._parse_dataclass_field(__snake_case ,__snake_case )
def __UpperCAmelCase ( self ,__snake_case=None ,__snake_case=False ,__snake_case=True ,__snake_case=None ,__snake_case=None ,):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A_ = []
if args_filename:
args_files.append(Path(__snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A_ = ArgumentParser()
args_file_parser.add_argument(__snake_case ,type=__snake_case ,action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
A_ , A_ = args_file_parser.parse_known_args(args=__snake_case )
A_ = vars(__snake_case ).get(args_file_flag.lstrip('''-''' ) ,__snake_case )
if cmd_args_file_paths:
args_files.extend([Path(__snake_case ) for p in cmd_args_file_paths] )
A_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A_ = file_args + args if args is not None else file_args + sys.argv[1:]
A_ , A_ = self.parse_known_args(args=__snake_case )
A_ = []
for dtype in self.dataclass_types:
A_ = {f.name for f in dataclasses.fields(__snake_case ) if f.init}
A_ = {k: v for k, v in vars(__snake_case ).items() if k in keys}
for k in keys:
delattr(__snake_case ,__snake_case )
A_ = dtype(**__snake_case )
outputs.append(__snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def __UpperCAmelCase ( self ,__snake_case ,__snake_case = False ):
"""simple docstring"""
A_ = set(args.keys() )
A_ = []
for dtype in self.dataclass_types:
A_ = {f.name for f in dataclasses.fields(__snake_case ) if f.init}
A_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A_ = dtype(**__snake_case )
outputs.append(__snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(f'Some keys are not used by the HfArgumentParser: {sorted(__snake_case )}' )
return tuple(__snake_case )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case = False ):
"""simple docstring"""
with open(Path(__snake_case ) ,encoding='''utf-8''' ) as open_json_file:
A_ = json.loads(open_json_file.read() )
A_ = self.parse_dict(__snake_case ,allow_extra_keys=__snake_case )
return tuple(__snake_case )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case = False ):
"""simple docstring"""
A_ = self.parse_dict(yaml.safe_load(Path(__snake_case ).read_text() ) ,allow_extra_keys=__snake_case )
return tuple(__snake_case )
| 188 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a__ : Optional[int] = False
class UpperCAmelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
A_ = torch.manual_seed(0 )
A_ = pipe(
image=__snake_case ,generator=__snake_case ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ,).images
A_ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 188 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCAmelCase__ = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
UpperCAmelCase__ = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
UpperCAmelCase__ = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Optional[int]="auto" , lowerCamelCase__ : Any=-1 , lowerCamelCase__ : str=0.9 , lowerCamelCase__ : Union[str, Any]=5 , lowerCamelCase__ : Tuple=500 , lowerCamelCase__ : List[str]="gpt2-large" , lowerCamelCase__ : List[Any]=-1 , lowerCamelCase__ : Any=1_024 , lowerCamelCase__ : Optional[Any]=25 , lowerCamelCase__ : Optional[Any]=5 , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[int]=25 , ) -> Tuple:
"""simple docstring"""
__lowercase = compute_mauve(
p_text=lowerCamelCase__ , q_text=lowerCamelCase__ , p_features=lowerCamelCase__ , q_features=lowerCamelCase__ , p_tokens=lowerCamelCase__ , q_tokens=lowerCamelCase__ , num_buckets=lowerCamelCase__ , pca_max_data=lowerCamelCase__ , kmeans_explained_var=lowerCamelCase__ , kmeans_num_redo=lowerCamelCase__ , kmeans_max_iter=lowerCamelCase__ , featurize_model_name=lowerCamelCase__ , device_id=lowerCamelCase__ , max_text_length=lowerCamelCase__ , divergence_curve_discretization_size=lowerCamelCase__ , mauve_scaling_factor=lowerCamelCase__ , verbose=lowerCamelCase__ , seed=lowerCamelCase__ , )
return out
| 700 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = BlipImageProcessor()
__lowercase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__lowercase = BlipaProcessor(lowerCamelCase__ , lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Optional[int] , **lowerCamelCase__ : Any ) -> str:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).tokenizer
def UpperCAmelCase_ ( self : Dict , **lowerCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).image_processor
def UpperCAmelCase_ ( self : List[str] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
__lowercase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCamelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''lower newer'''
__lowercase = processor(text=lowerCamelCase__ )
__lowercase = tokenizer(lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCamelCase__ )
__lowercase = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 362 | 0 |
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: Any = len(_UpperCAmelCase )
lowerCamelCase_: str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCamelCase_: Any = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCamelCase_: str = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCamelCase_: Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCamelCase_: List[Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 423 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_UpperCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_UpperCAmelCase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = ["pixel_values"]
def __init__( self : Optional[int] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 2_55 , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , **A_ : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: Tuple = size if size is not None else {"""shortest_edge""": 2_24}
lowerCamelCase_: List[Any] = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_: Dict = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCamelCase_: Any = get_size_dict(A_ , param_name="""crop_size""" )
lowerCamelCase_: Any = do_resize
lowerCamelCase_: Union[str, Any] = size
lowerCamelCase_: Optional[int] = do_center_crop
lowerCamelCase_: Union[str, Any] = crop_size
lowerCamelCase_: Union[str, Any] = resample
lowerCamelCase_: Any = do_rescale
lowerCamelCase_: Optional[Any] = rescale_factor
lowerCamelCase_: int = do_normalize
lowerCamelCase_: List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_: List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : List[str] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[str] , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_: Any = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" in size:
lowerCamelCase_: Optional[Any] = get_resize_output_image_size(A_ , size["""shortest_edge"""] , default_to_square=A_ )
elif "height" in size and "width" in size:
lowerCamelCase_: Union[str, Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Tuple , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_: Optional[int] = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(A_ , size=(size["""height"""], size["""width"""]) , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Optional[int] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Union[str, Any] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Any , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_: Optional[Any] = to_numpy_array(A_ )
if do_resize:
lowerCamelCase_: Tuple = self.resize(image=A_ , size=A_ , resample=A_ )
if do_center_crop:
lowerCamelCase_: int = self.center_crop(A_ , size=A_ )
if do_rescale:
lowerCamelCase_: str = self.rescale(image=A_ , scale=A_ )
if do_normalize:
lowerCamelCase_: Tuple = self.normalize(image=A_ , mean=A_ , std=A_ )
lowerCamelCase_: Any = to_channel_dimension_format(A_ , A_ )
return image
def lowerCAmelCase ( self : Dict , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_: Any = resample if resample is not None else self.resample
lowerCamelCase_: List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_: int = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_: Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_: Dict = image_std if image_std is not None else self.image_std
lowerCamelCase_: int = size if size is not None else self.size
lowerCamelCase_: str = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_: Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_: Dict = get_size_dict(A_ , param_name="""crop_size""" )
if not valid_images(A_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCamelCase_: str = make_batched(A_ )
lowerCamelCase_: List[str] = [
[
self._preprocess_image(
image=A_ , do_resize=A_ , size=A_ , resample=A_ , do_center_crop=A_ , crop_size=A_ , do_rescale=A_ , rescale_factor=A_ , do_normalize=A_ , image_mean=A_ , image_std=A_ , data_format=A_ , )
for img in video
]
for video in videos
]
lowerCamelCase_: int = {"""pixel_values""": videos}
return BatchFeature(data=A_ , tensor_type=A_ )
| 423 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=UpperCamelCase__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=UpperCamelCase__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=UpperCamelCase__ )
return parser.parse_args()
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = parse_args()
# Import training_script as a module.
__lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowercase = script_fpath.stem
__lowercase = importlib.import_module(UpperCamelCase__ )
# Patch sys.argv
__lowercase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ ={
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
UpperCAmelCase__ =[
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
UpperCAmelCase__ =[
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
UpperCAmelCase__ =[
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 442 | 0 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCAmelCase_ : List[str] = ['''text''', '''image''', '''audio''']
def _lowerCAmelCase(a : List[str] ) -> List[str]:
_SCREAMING_SNAKE_CASE =[]
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(a , a ):
inputs.append(create_inputs(a ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def _lowerCAmelCase(a : List ) -> Dict:
_SCREAMING_SNAKE_CASE =[]
for output in outputs:
if isinstance(a , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(a , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(a , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
_SCREAMING_SNAKE_CASE =self.tool.inputs
for _input in inputs:
if isinstance(_input , _A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_SCREAMING_SNAKE_CASE =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =create_inputs(self.tool.inputs )
_SCREAMING_SNAKE_CASE =self.tool(*_A )
# There is a single output
if len(self.tool.outputs ) == 1:
_SCREAMING_SNAKE_CASE =[outputs]
self.assertListEqual(output_types(_A ) , self.tool.outputs )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =create_inputs(self.tool.inputs )
_SCREAMING_SNAKE_CASE =self.tool(*_A )
if not isinstance(_A , _A ):
_SCREAMING_SNAKE_CASE =[outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
for output, output_type in zip(_A , self.tool.outputs ):
_SCREAMING_SNAKE_CASE =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_A , _A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =create_inputs(self.tool.inputs )
_SCREAMING_SNAKE_CASE =[]
for _input, input_type in zip(_A , self.tool.inputs ):
if isinstance(_A , _A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_SCREAMING_SNAKE_CASE =self.tool(*_A )
if not isinstance(_A , _A ):
_SCREAMING_SNAKE_CASE =[outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
| 255 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =(3_2, 3_2)
_SCREAMING_SNAKE_CASE =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_A )
return image
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(_A )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
def extract(*_A , **_A ):
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.ones([0] )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
self.pixel_values.to(_A )
return self
return Out()
return extract
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.dummy_cond_unet
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_A )
_SCREAMING_SNAKE_CASE =self.dummy_vae
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_SCREAMING_SNAKE_CASE =7_7
_SCREAMING_SNAKE_CASE =self.dummy_image.to(_A )
_SCREAMING_SNAKE_CASE =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline(
unet=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , safety_checker=_A , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_A )
_SCREAMING_SNAKE_CASE =alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
_SCREAMING_SNAKE_CASE ='''A painting of a squirrel eating a burger'''
_SCREAMING_SNAKE_CASE =torch.Generator(device=_A ).manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe(
[prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_A , )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =torch.Generator(device=_A ).manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe(
[prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_A , return_dict=_A , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_SCREAMING_SNAKE_CASE =np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.dummy_cond_unet
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_A )
_SCREAMING_SNAKE_CASE =self.dummy_vae
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_SCREAMING_SNAKE_CASE =7_7
_SCREAMING_SNAKE_CASE =self.dummy_image.to(_A )
# put models in fp16
_SCREAMING_SNAKE_CASE =unet.half()
_SCREAMING_SNAKE_CASE =vae.half()
_SCREAMING_SNAKE_CASE =bert.half()
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline(
unet=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , safety_checker=_A , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_A )
_SCREAMING_SNAKE_CASE =alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
_SCREAMING_SNAKE_CASE ='''A painting of a squirrel eating a burger'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe(
[prompt] , generator=_A , num_inference_steps=2 , output_type='''np''' , image=_A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
_SCREAMING_SNAKE_CASE =init_image.resize((7_6_0, 5_0_4) )
_SCREAMING_SNAKE_CASE ='''BAAI/AltDiffusion'''
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline.from_pretrained(
_A , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE ='''A fantasy landscape, trending on artstation'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , generator=_A , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
_SCREAMING_SNAKE_CASE =image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
_SCREAMING_SNAKE_CASE =np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_SCREAMING_SNAKE_CASE =init_image.resize((7_6_8, 5_1_2) )
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
_SCREAMING_SNAKE_CASE ='''BAAI/AltDiffusion'''
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline.from_pretrained(
_A , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE ='''A fantasy landscape, trending on artstation'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , generator=_A , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 255 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
if not nums:
return 0
_UpperCAmelCase : str =nums[0]
_UpperCAmelCase : Optional[int] =0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] =(
max_excluding + num,
max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
)
return max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : int = 1_0_0 ):
'''simple docstring'''
_UpperCAmelCase : int =set()
_UpperCAmelCase : Union[str, Any] =0
_UpperCAmelCase : Optional[Any] =n + 1 # maximum limit
for a in range(2 , __lowerCamelCase ):
for b in range(2 , __lowerCamelCase ):
_UpperCAmelCase : Tuple =a**b # calculates the current power
collect_powers.add(__lowerCamelCase ) # adds the result to the set
return len(__lowerCamelCase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 331 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
_lowerCAmelCase = [True] * (num + 1)
_lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCAmelCase ):
_lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : List[Any] =int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 207 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 391 | 0 |
"""simple docstring"""
import math
import qiskit
def lowerCamelCase ( _snake_case = 1 ,_snake_case = 1 ,_snake_case = 1 ):
if (
isinstance(_snake_case ,_snake_case )
or isinstance(_snake_case ,_snake_case )
or isinstance(_snake_case ,_snake_case )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(_snake_case ) != input_a)
or (math.floor(_snake_case ) != input_a)
or (math.floor(_snake_case ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
UpperCAmelCase__ : Dict = qiskit.QuantumRegister(4 ,'qr' )
UpperCAmelCase__ : Optional[Any] = qiskit.ClassicalRegister(2 ,'cr' )
# list the entries
UpperCAmelCase__ : Tuple = [input_a, input_a, carry_in]
UpperCAmelCase__ : int = qiskit.QuantumCircuit(_snake_case ,_snake_case )
for i in range(0 ,3 ):
if entry[i] == 2:
quantum_circuit.h(_snake_case ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_snake_case ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_snake_case ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 ,1 ,3 ) # ccx = toffoli gate
quantum_circuit.cx(0 ,1 )
quantum_circuit.ccx(1 ,2 ,3 )
quantum_circuit.cx(1 ,2 )
quantum_circuit.cx(0 ,1 )
quantum_circuit.measure([2, 3] ,_snake_case ) # measure the last two qbits
UpperCAmelCase__ : str = qiskit.Aer.get_backend('aer_simulator' )
UpperCAmelCase__ : List[Any] = qiskit.execute(_snake_case ,_snake_case ,shots=1000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 254 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase ( _snake_case ):
def wrapper(*_snake_case ,**_snake_case ):
UpperCAmelCase__ : str = timeit.default_timer()
UpperCAmelCase__ : Dict = func(*_snake_case ,**_snake_case )
UpperCAmelCase__ : Dict = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : Dict = func.__name__
return wrapper
def lowerCamelCase ( _snake_case ,_snake_case=100 ,_snake_case=None ):
UpperCAmelCase__ : int = []
UpperCAmelCase__ : List[Any] = seq_shapes or {}
for i in range(_snake_case ):
UpperCAmelCase__ : Tuple = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_snake_case ,_ArrayXD ):
UpperCAmelCase__ : Union[str, Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_snake_case ,datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : List[Any] = 'The small grey turtle was surprisingly fast when challenged.'
else:
UpperCAmelCase__ : List[str] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(_snake_case ,datasets.Sequence ):
while isinstance(_snake_case ,datasets.Sequence ):
UpperCAmelCase__ : str = v.feature
UpperCAmelCase__ : Optional[Any] = seq_shapes[k]
UpperCAmelCase__ : Union[str, Any] = np.random.rand(*_snake_case ).astype(v.dtype )
UpperCAmelCase__ : str = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case=100 ,_snake_case=None ):
UpperCAmelCase__ : Any = generate_examples(_snake_case ,num_examples=_snake_case ,seq_shapes=_snake_case )
with ArrowWriter(features=_snake_case ,path=_snake_case ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : int = features.encode_example(_snake_case )
writer.write(_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
UpperCAmelCase__ : str = datasets.Dataset.from_file(filename=_snake_case ,info=datasets.DatasetInfo(features=_snake_case ) )
return dataset
| 254 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = TextToVideoSDPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_SCREAMING_SNAKE_CASE : Dict = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_lowercase : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
_lowercase : Optional[Any] = CLIPTextModel(_UpperCamelCase )
_lowercase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowercase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_lowercase : Union[str, Any] = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : Any = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : Any = TextToVideoSDPipeline(**_UpperCamelCase )
_lowercase : List[Any] = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : int = self.get_dummy_inputs(_UpperCamelCase )
_lowercase : Union[str, Any] = "np"
_lowercase : Optional[Any] = sd_pipe(**_UpperCamelCase ).frames
_lowercase : Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_lowercase : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCamelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCamelCase , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_lowercase : Optional[Any] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowercase : Any = pipe.to("cuda" )
_lowercase : str = "Spiderman is surfing"
_lowercase : str = torch.Generator(device="cpu" ).manual_seed(0 )
_lowercase : Any = pipe(_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=25 , output_type="pt" ).frames
_lowercase : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_lowercase : Optional[int] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_lowercase : Tuple = pipe.to("cuda" )
_lowercase : Optional[int] = "Spiderman is surfing"
_lowercase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
_lowercase : Any = pipe(_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="pt" ).frames
_lowercase : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 245 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _A ( snake_case , snake_case , snake_case , snake_case , ) -> list[float]:
_lowercase , _lowercase : Union[str, Any] = coefficient_matrix.shape
_lowercase , _lowercase : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
_lowercase : Any = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(snake_case )
if colsa != 1:
_lowercase : Dict = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(snake_case )
if rowsa != rowsa:
_lowercase : int = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(snake_case )
if len(snake_case ) != rowsa:
_lowercase : Tuple = (
"Number of initial values must be equal to number of rows in coefficient "
F'''matrix but received {len(snake_case )} and {rowsa}'''
)
raise ValueError(snake_case )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
_lowercase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowercase , _lowercase : Dict = table.shape
strictly_diagonally_dominant(snake_case )
# Iterates the whole matrix for given number of times
for _ in range(snake_case ):
_lowercase : int = []
for row in range(snake_case ):
_lowercase : Tuple = 0
for col in range(snake_case ):
if col == row:
_lowercase : str = table[row][col]
elif col == cols - 1:
_lowercase : List[str] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowercase : List[str] = (temp + val) / denom
new_val.append(snake_case )
_lowercase : str = new_val
return [float(snake_case ) for i in new_val]
def _A ( snake_case ) -> bool:
_lowercase , _lowercase : Optional[int] = table.shape
_lowercase : Optional[Any] = True
for i in range(0 , snake_case ):
_lowercase : Dict = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 1 |
'''simple docstring'''
import math
def snake_case__ ( _A: Tuple = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 711 | '''simple docstring'''
import operator as op
def snake_case__ ( _A: Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = lambda _A , _A : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(_A )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_A ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ )
else:
lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ )
lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ )
stack.append(
str(opr[x](int(_A ) , int(_A ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
__lowercase = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 605 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer("""Do you support jax jitted function?""",return_tensors=TensorType.JAX )
@jax.jit
def eval(**__SCREAMING_SNAKE_CASE ):
return model(**__SCREAMING_SNAKE_CASE )
eval(**__SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxRobertaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer("""Do you support jax jitted function?""",return_tensors=TensorType.JAX )
@jax.jit
def eval(**__SCREAMING_SNAKE_CASE ):
return model(**__SCREAMING_SNAKE_CASE )
eval(**__SCREAMING_SNAKE_CASE ).block_until_ready()
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE,"""bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained("""bert-base""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE,revision="""aaaaaa""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE,"""hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""",):
__lowerCAmelCase = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE,"""Use `from_pt=True` to load this model""" ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 689 |
'''simple docstring'''
import string
from math import logaa
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
A_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
A_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
A_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
A_ = corpus_without_punctuation.split("\n" )
A_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase_ ))
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
return round(tf * idf , 3 )
| 452 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase = threading.Lock()
_lowerCamelCase = None
_lowerCamelCase = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
_lowerCamelCase = logging.WARNING
_lowerCamelCase = True
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = os.getenv("""TRANSFORMERS_VERBOSITY""" , UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def SCREAMING_SNAKE_CASE__ ( ):
return __name__.split(""".""" )[0]
def SCREAMING_SNAKE_CASE__ ( ):
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE__ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE__ = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE__ = None
def SCREAMING_SNAKE_CASE__ ( ):
return log_levels
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[str] = None ):
if name is None:
SCREAMING_SNAKE_CASE__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE__ = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self: str , *UpperCamelCase__: Any , **UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , UpperCamelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase__ , **UpperCamelCase__ )
_lowerCamelCase = warning_advice
@functools.lru_cache(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self: Dict , *UpperCamelCase__: str , **UpperCamelCase__: Optional[int] ):
self.warning(*UpperCamelCase__ , **UpperCamelCase__ )
_lowerCamelCase = warning_once
class UpperCamelCase_ :
def __init__( self :List[str] , *__A :Optional[Any] , **__A :Tuple ) -> str: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = args[0] if args else None
def __iter__( self :Any ) -> List[str]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self :Union[str, Any] , __A :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
def empty_fn(*__A :Optional[Any] , **__A :Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self :Any ) -> str:
"""simple docstring"""
return self
def __exit__( self :List[str] , __A :Optional[int] , __A :List[str] , __A :str ) -> Any:
"""simple docstring"""
return
class UpperCamelCase_ :
def __call__( self :List[Any] , *__A :Tuple , **__A :Any ) -> List[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*__A , **__A )
else:
return EmptyTqdm(*__A , **__A )
def _snake_case ( self :Tuple , *__A :List[Any] , **__A :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__A , **__A )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase = _tqdm_cls()
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ = True
hf_hub_utils.enable_progress_bars()
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ = False
hf_hub_utils.disable_progress_bars() | 59 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCamelCase = '\\n Text data.\n Second line of data.'
_lowerCamelCase = 'file'
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE__ = bytes(UpperCamelCase__ , """utf-8""" )
with zstd.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , """w""" ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Dict , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE__ = input_paths[compression_format]
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = """custom_cache"""
SCREAMING_SNAKE_CASE__ = """custom_extracted_dir"""
SCREAMING_SNAKE_CASE__ = tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE__ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , UpperCamelCase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE__ = xz_file
SCREAMING_SNAKE_CASE__ = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
SCREAMING_SNAKE_CASE__ = """./__missing_file__.txt"""
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
with pytest.raises(UpperCamelCase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
http_get("""https://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head("""s3://huggingface.co""" ) | 59 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = DiTPipeline
lowercase__ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase__ : Any = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
lowercase__ : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Tuple:
torch.manual_seed(0 )
__magic_name__ : int = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=10_00 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCAmelCase__ , )
__magic_name__ : Optional[int] = AutoencoderKL()
__magic_name__ : Any = DDIMScheduler()
__magic_name__ : Any = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Any:
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__magic_name__ : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
__magic_name__ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : int = """cpu"""
__magic_name__ : List[str] = self.get_dummy_components()
__magic_name__ : List[Any] = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Any = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__magic_name__ : int = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
__magic_name__ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def __magic_name__ ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = torch.manual_seed(0 )
__magic_name__ : Any = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
__magic_name__ : Union[str, Any] = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
__magic_name__ : Optional[int] = pipe.get_label_ids(lowerCAmelCase__ )
__magic_name__ : List[str] = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : str = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Any = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
__magic_name__ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
__magic_name__ : Union[str, Any] = ["""vase""", """umbrella"""]
__magic_name__ : Dict = pipe.get_label_ids(lowerCAmelCase__ )
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : Optional[Any] = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 324 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.0_2 , ) -> int:
__magic_name__ : List[str] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Any = seq_length
__magic_name__ : str = is_training
__magic_name__ : List[str] = use_input_mask
__magic_name__ : Union[str, Any] = use_token_type_ids
__magic_name__ : List[str] = use_labels
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : List[Any] = rotary_dim
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Optional[int] = None
__magic_name__ : List[Any] = vocab_size - 1
__magic_name__ : Any = vocab_size - 1
__magic_name__ : List[Any] = vocab_size - 1
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Any = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __magic_name__ ( self ) -> str:
__magic_name__ : str = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : int = config_and_inputs
__magic_name__ : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = 20
__magic_name__ : Dict = model_class_name(lowerCAmelCase__ )
__magic_name__ : Optional[int] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__ )
__magic_name__ : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__magic_name__ : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__magic_name__ : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__magic_name__ : str = model(
input_ids[:, -1:] , attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase__ , )
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Any = 20
__magic_name__ : Tuple = model_class_name(lowerCAmelCase__ )
__magic_name__ : int = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__magic_name__ : Union[str, Any] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__ )
__magic_name__ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__magic_name__ : Dict = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__magic_name__ : Union[str, Any] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
__magic_name__ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase__ : List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : List[str] = FlaxGPTJModelTester(self )
def __magic_name__ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__magic_name__ ,__magic_name__ ,__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@tooslow
def __magic_name__ ( self ) -> int:
__magic_name__ : Any = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
__magic_name__ : Optional[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
__magic_name__ : int = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
__magic_name__ : str = False
__magic_name__ : str = model.config.eos_token_id
__magic_name__ : Optional[int] = jax.jit(model.generate )
__magic_name__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
__magic_name__ : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__magic_name__ : Any = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@is_pt_flax_cross_test
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__magic_name__ : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__magic_name__ : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : int = pt_inputs["""input_ids"""].shape
__magic_name__ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
__magic_name__ : str = 0
__magic_name__ : Dict = 1
__magic_name__ : str = 0
__magic_name__ : Any = 1
__magic_name__ : Union[str, Any] = pt_model_class(lowerCAmelCase__ ).eval()
__magic_name__ : Dict = model_class(lowerCAmelCase__ , dtype=jnp.floataa )
__magic_name__ : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
__magic_name__ : List[str] = fx_state
with torch.no_grad():
__magic_name__ : Dict = pt_model(**lowerCAmelCase__ ).to_tuple()
__magic_name__ : str = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
__magic_name__ : str = model_class.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
__magic_name__ : List[str] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(
len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__magic_name__ : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__magic_name__ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__ : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = pt_model_class(lowerCAmelCase__ ).eval()
__magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ , dtype=jnp.floataa )
__magic_name__ : Optional[int] = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
__magic_name__ ,__magic_name__ : Union[str, Any] = pt_inputs["""input_ids"""].shape
__magic_name__ : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
__magic_name__ : str = 0
__magic_name__ : Dict = 1
__magic_name__ : List[Any] = 0
__magic_name__ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__magic_name__ : int = pt_model(**lowerCAmelCase__ ).to_tuple()
__magic_name__ : Tuple = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
__magic_name__ : List[Any] = pt_model_class.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
with torch.no_grad():
__magic_name__ : Tuple = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(
len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __magic_name__ ( self ) -> int:
for model_class_name in self.all_model_classes:
__magic_name__ : Tuple = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 324 | 1 |
import math
from collections.abc import Callable
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : float = xa
_lowerCamelCase : float = xa
while True:
if x_n == x_na or function(lowercase_ ) == function(lowercase_ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
_lowerCamelCase : float = x_na - (
function(lowercase_ ) / ((function(lowercase_ ) - function(lowercase_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_lowerCamelCase : List[str] = x_na
_lowerCamelCase : Optional[Any] = x_na
def __UpperCAmelCase( lowercase_ ):
return math.pow(lowercase_ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 613 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *a__ , **a__):
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__)
| 613 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase__( lowercase : list , lowercase : list , lowercase : list , lowercase : list , lowercase : list ) -> float:
__snake_case : Optional[int] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowercase )] )
__snake_case : Any = np.array(lowercase )
__snake_case : Optional[int] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowercase ) ) , x.transpose() ) , lowercase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase__( lowercase : list , lowercase : list , lowercase : list ) -> float:
__snake_case : Tuple = (1, 2, 1)
__snake_case : Optional[int] = (1, 1, 0, 7)
__snake_case : Tuple = SARIMAX(
lowercase , exog=lowercase , order=lowercase , seasonal_order=lowercase )
__snake_case : int = model.fit(disp=lowercase , maxiter=600 , method="nm" )
__snake_case : Union[str, Any] = model_fit.predict(1 , len(lowercase ) , exog=[test_match] )
return result[0]
def lowerCAmelCase__( lowercase : list , lowercase : list , lowercase : list ) -> float:
__snake_case : Any = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowercase , lowercase )
__snake_case : List[str] = regressor.predict(lowercase )
return y_pred[0]
def lowerCAmelCase__( lowercase : list ) -> float:
train_user.sort()
__snake_case : Optional[int] = np.percentile(lowercase , 25 )
__snake_case : List[str] = np.percentile(lowercase , 75 )
__snake_case : int = qa - qa
__snake_case : Dict = qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase__( lowercase : list , lowercase : float ) -> bool:
__snake_case : Tuple = 0
__snake_case : Dict = 0
for i in list_vote:
if i > actual_result:
__snake_case : List[str] = not_safe + 1
else:
if abs(abs(lowercase ) - abs(lowercase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_UpperCamelCase = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
_UpperCamelCase = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
_UpperCamelCase = Normalizer().fit_transform(data_input_df.values)
# split data
_UpperCamelCase = normalize_df[:, 2].tolist()
_UpperCamelCase = normalize_df[:, 0].tolist()
_UpperCamelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_UpperCamelCase = normalize_df[:, [1, 2]].tolist()
_UpperCamelCase = x[: len(x) - 1]
_UpperCamelCase = x[len(x) - 1 :]
# for linear regression & sarimax
_UpperCamelCase = total_date[: len(total_date) - 1]
_UpperCamelCase = total_user[: len(total_user) - 1]
_UpperCamelCase = total_match[: len(total_match) - 1]
_UpperCamelCase = total_date[len(total_date) - 1 :]
_UpperCamelCase = total_user[len(total_user) - 1 :]
_UpperCamelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
_UpperCamelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_UpperCamelCase = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 243 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=[30, 30] , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=8 , UpperCAmelCase=10 , ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : Tuple = patch_size
__snake_case : Any = num_channels
__snake_case : Dict = is_training
__snake_case : Dict = use_labels
__snake_case : Any = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Optional[Any] = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Optional[Any] = scope
__snake_case : Tuple = n_targets
__snake_case : str = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__snake_case : Dict = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__snake_case : Any = num_patches + 1 + self.num_detection_tokens
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__snake_case : Any = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__snake_case : Any = []
for i in range(self.batch_size ):
__snake_case : Dict = {}
__snake_case : Optional[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCAmelCase )
__snake_case : Union[str, Any] = torch.rand(self.n_targets , 4 , device=UpperCAmelCase )
labels.append(UpperCAmelCase )
__snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = YolosModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__snake_case : int = model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[int] = YolosForObjectDetection(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__snake_case : Any = model(pixel_values=UpperCAmelCase )
__snake_case : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__snake_case : int = model(pixel_values=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[Any] = config_and_inputs
__snake_case : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] =(
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase_ : Optional[int] =False
UpperCAmelCase_ : List[Any] =False
UpperCAmelCase_ : str =False
UpperCAmelCase_ : List[Any] =False
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
__snake_case : Any = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__snake_case : int = []
for i in range(self.model_tester.batch_size ):
__snake_case : Dict = {}
__snake_case : int = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCAmelCase , dtype=torch.long )
__snake_case : Union[str, Any] = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCAmelCase , dtype=torch.float )
labels.append(UpperCAmelCase )
__snake_case : Any = labels
return inputs_dict
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = YolosModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(UpperCAmelCase )
__snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = True
# in YOLOS, the seq_len is different
__snake_case : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = True
__snake_case : Optional[Any] = False
__snake_case : int = True
__snake_case : Any = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : str = True
__snake_case : Union[str, Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__snake_case : Tuple = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__snake_case : str = len(UpperCAmelCase )
# Check attention is always last and order is fine
__snake_case : Union[str, Any] = True
__snake_case : List[Any] = True
__snake_case : Tuple = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__snake_case : List[str] = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__snake_case : Tuple = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__snake_case : List[Any] = outputs.hidden_states
__snake_case : Dict = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# YOLOS has a different seq_length
__snake_case : Tuple = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCAmelCase )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = YolosModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowerCAmelCase__( ) -> List[Any]:
__snake_case : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(UpperCAmelCase )
__snake_case : Any = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : int = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(inputs.pixel_values )
# verify outputs
__snake_case : int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__snake_case : Tuple = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=UpperCAmelCase , )
__snake_case : Optional[Any] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
# verify postprocessing
__snake_case : Union[str, Any] = image_processor.post_process_object_detection(
UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__snake_case : List[str] = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(UpperCAmelCase )
__snake_case : List[Any] = [75, 75, 17, 63, 17]
__snake_case : Tuple = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(UpperCAmelCase )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , UpperCAmelCase , atol=1E-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , UpperCAmelCase )
self.assertTrue(torch.allclose(results["boxes"][0, :] , UpperCAmelCase ) )
| 243 | 1 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = 'Hello world! cécé herlolip'
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : str = FairseqRobertaModel.from_pretrained(lowerCAmelCase__ )
roberta.eval() # disable dropout
A_ : List[Any] = roberta.model.encoder.sentence_encoder
A_ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
A_ : Any = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , lowerCAmelCase__ )
A_ : List[Any] = XLMRobertaXLForSequenceClassification(lowerCAmelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ : Union[str, Any] = roberta_sent_encoder.embed_tokens.weight
A_ : str = roberta_sent_encoder.embed_positions.weight
A_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
A_ : List[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ : BertLayer = model.roberta.encoder.layer[i]
A_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
A_ : RobertaAttention = layer.attention
A_ : List[Any] = roberta_layer.self_attn_layer_norm.weight
A_ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
A_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A_ : List[str] = roberta_layer.self_attn.q_proj.weight
A_ : Optional[int] = roberta_layer.self_attn.q_proj.bias
A_ : Any = roberta_layer.self_attn.k_proj.weight
A_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
A_ : Any = roberta_layer.self_attn.v_proj.weight
A_ : int = roberta_layer.self_attn.v_proj.bias
# self-attention output
A_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A_ : Optional[Any] = roberta_layer.self_attn.out_proj.weight
A_ : Union[str, Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A_ : str = roberta_layer.final_layer_norm.weight
A_ : List[Any] = roberta_layer.final_layer_norm.bias
# intermediate
A_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : Tuple = roberta_layer.fca.weight
A_ : List[Any] = roberta_layer.fca.bias
# output
A_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : Dict = roberta_layer.fca.weight
A_ : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
A_ : Tuple = roberta.model.classification_heads['mnli'].dense.weight
A_ : Any = roberta.model.classification_heads['mnli'].dense.bias
A_ : Any = roberta.model.classification_heads['mnli'].out_proj.weight
A_ : Union[str, Any] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
A_ : Union[str, Any] = roberta.model.encoder.lm_head.dense.weight
A_ : str = roberta.model.encoder.lm_head.dense.bias
A_ : Dict = roberta.model.encoder.lm_head.layer_norm.weight
A_ : int = roberta.model.encoder.lm_head.layer_norm.bias
A_ : str = roberta.model.encoder.lm_head.weight
A_ : Any = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ : torch.Tensor = roberta.encode(lowerCAmelCase__ ).unsqueeze(0 ) # batch of size 1
A_ : Any = model(lowerCAmelCase__ )[0]
if classification_head:
A_ : Optional[int] = roberta.model.classification_heads['mnli'](roberta.extract_features(lowerCAmelCase__ ) )
else:
A_ : Optional[Any] = roberta.model(lowerCAmelCase__ )[0]
print(our_output.shape , their_output.shape )
A_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
A_ : List[Any] = torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(lowerCAmelCase__ ).mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_lowerCamelCase : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 709 |
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
for i in range(0 , _UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
for i in range(_UpperCAmelCase , 0 , -1 ):
for _ in range(_UpperCAmelCase , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(_UpperCAmelCase ) # upper half
reverse_floyd(_UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
_lowerCamelCase : Tuple = 1
while K:
_lowerCamelCase : Optional[int] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
_lowerCamelCase : Optional[Any] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 361 | 0 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def UpperCamelCase__ ( _lowercase : np.ndarray , _lowercase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__UpperCAmelCase: Tuple = math.sqrt(_lowercase )
__UpperCAmelCase: List[Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def UpperCamelCase__ ( _lowercase : np.ndarray , _lowercase : int , _lowercase : int , _lowercase : int ) -> np.ndarray:
__UpperCAmelCase: Optional[Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def UpperCamelCase__ ( _lowercase : int , _lowercase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__UpperCAmelCase: List[Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowercase ):
for j in range(0 , _lowercase ):
__UpperCAmelCase: Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowercase , _lowercase )
def UpperCamelCase__ ( _lowercase : np.ndarray , _lowercase : float , _lowercase : float , _lowercase : int , ) -> np.ndarray:
__UpperCAmelCase: Any = np.zeros(img.shape )
__UpperCAmelCase: Optional[Any] = get_gauss_kernel(_lowercase , _lowercase )
__UpperCAmelCase, __UpperCAmelCase: Tuple = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__UpperCAmelCase: Tuple = get_slice(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase: Dict = img_s - img_s[kernel_size // 2, kernel_size // 2]
__UpperCAmelCase: Optional[int] = vec_gaussian(_lowercase , _lowercase )
__UpperCAmelCase: Tuple = np.multiply(_lowercase , _lowercase )
__UpperCAmelCase: int = np.multiply(_lowercase , _lowercase )
__UpperCAmelCase: Tuple = np.sum(_lowercase ) / np.sum(_lowercase )
__UpperCAmelCase: Optional[int] = val
return imga
def UpperCamelCase__ ( _lowercase : list ) -> tuple:
__UpperCAmelCase: Tuple = args[1] if args[1:] else """../image_data/lena.jpg"""
__UpperCAmelCase: Optional[Any] = float(args[2] ) if args[2:] else 1.0
__UpperCAmelCase: str = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__UpperCAmelCase: Optional[Any] = int(args[4] )
__UpperCAmelCase: Optional[int] = kernel_size + abs(kernel_size % 2 - 1 )
else:
__UpperCAmelCase: Any = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE_ = cva.imread(filename, 0)
cva.imshow('input image', img)
SCREAMING_SNAKE_CASE_ = img / 2_55
SCREAMING_SNAKE_CASE_ = out.astype('float32')
SCREAMING_SNAKE_CASE_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE_ = out * 2_55
SCREAMING_SNAKE_CASE_ = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows() | 523 | '''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
def UpperCamelCase__ ( _lowercase : Any=2 , _lowercase : str=3 , _lowercase : List[str]=1_6 , _lowercase : int = 1_0 , _lowercase : int = 2 ) -> str:
def get_dataset(_lowercase : Optional[Any] ):
__UpperCAmelCase: List[str] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCAmelCase: Tuple = get_dataset(_lowercase )
__UpperCAmelCase: Dict = get_dataset(_lowercase )
__UpperCAmelCase: Dict = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
__UpperCAmelCase: Tuple = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase__ ( _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : int=None ) -> Optional[int]:
__UpperCAmelCase: Optional[int] = []
for epoch in range(_lowercase ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCAmelCase, __UpperCAmelCase: Tuple = batch
__UpperCAmelCase: List[str] = model(_lowercase )
__UpperCAmelCase: List[Any] = torch.nn.functional.mse_loss(_lowercase , _lowercase )
accelerator.backward(_lowercase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a ( nn.Module ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: List[Any] = nn.Parameter(torch.randn(1 ) )
__UpperCAmelCase: List[Any] = nn.Parameter(torch.randn(1 ) )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return x * self.a + self.b
class a ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase: List[Any] = DummyModel()
__UpperCAmelCase: List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = dummy_dataloaders()
__UpperCAmelCase: Dict = ProjectConfiguration(total_limit=1 , project_dir=snake_case_ , automatic_checkpoint_naming=snake_case_ )
# Train baseline
__UpperCAmelCase: Union[str, Any] = Accelerator(project_config=snake_case_ )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Tuple = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase: Optional[int] = DummyModel()
__UpperCAmelCase: List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase, __UpperCAmelCase: int = dummy_dataloaders()
# Train baseline
__UpperCAmelCase: Union[str, Any] = Accelerator()
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Dict = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
__UpperCAmelCase: int = os.path.join(snake_case_ , """initial""" )
accelerator.save_state(snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): List[Any] = model.a.item(), model.b.item()
__UpperCAmelCase: int = optimizer.state_dict()
__UpperCAmelCase: Union[str, Any] = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): Tuple = model.a.item(), model.b.item()
__UpperCAmelCase: int = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase: str = DummyModel()
__UpperCAmelCase: str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = dummy_dataloaders()
__UpperCAmelCase: Optional[Any] = Accelerator()
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Optional[Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): Any = model.a.item(), model.b.item()
__UpperCAmelCase: int = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
__UpperCAmelCase: Union[str, Any] = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
__UpperCAmelCase: Optional[int] = os.path.join(snake_case_ , """checkpoint""" )
accelerator.save_state(snake_case_ )
# Load everything back in and make sure all states work
accelerator.load_state(snake_case_ )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): str = model.a.item(), model.b.item()
__UpperCAmelCase: int = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase: List[Any] = DummyModel()
__UpperCAmelCase: List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase, __UpperCAmelCase: Tuple = dummy_dataloaders()
__UpperCAmelCase: Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
__UpperCAmelCase: Any = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
((__UpperCAmelCase), (__UpperCAmelCase)): Optional[int] = model.a.item(), model.b.item()
__UpperCAmelCase: int = optimizer.state_dict()
__UpperCAmelCase: List[Any] = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): List[Any] = model.a.item(), model.b.item()
__UpperCAmelCase: str = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase: List[str] = DummyModel()
__UpperCAmelCase: Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase, __UpperCAmelCase: Dict = dummy_dataloaders()
__UpperCAmelCase: Optional[int] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=snake_case_ )
__UpperCAmelCase: Any = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) )
((__UpperCAmelCase), (__UpperCAmelCase)): Union[str, Any] = model.a.item(), model.b.item()
__UpperCAmelCase: Optional[Any] = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
__UpperCAmelCase: List[str] = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__UpperCAmelCase), (__UpperCAmelCase)): str = model.a.item(), model.b.item()
__UpperCAmelCase: Tuple = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = torch.tensor([1, 2, 3] )
__UpperCAmelCase: Tuple = torch.tensor([2, 3, 4] )
__UpperCAmelCase: List[str] = DummyModel()
__UpperCAmelCase: int = torch.optim.Adam(net.parameters() )
__UpperCAmelCase: str = Accelerator()
with self.assertRaises(snake_case_ ) as ve:
accelerator.register_for_checkpointing(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase: Tuple = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase: List[str] = DummyModel()
__UpperCAmelCase: int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase: Optional[int] = torch.optim.lr_scheduler.StepLR(snake_case_ , step_size=1 , gamma=0.9_9 )
__UpperCAmelCase, __UpperCAmelCase: Any = dummy_dataloaders()
__UpperCAmelCase: Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
__UpperCAmelCase: List[Any] = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
__UpperCAmelCase: Union[str, Any] = scheduler.state_dict()
train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(snake_case_ , scheduler.state_dict() )
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase: Optional[int] = DummyModel()
__UpperCAmelCase: Tuple = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ , total_limit=2 )
# Train baseline
__UpperCAmelCase: Any = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__UpperCAmelCase: List[Any] = accelerator.prepare(snake_case_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = '/tmp/accelerate/state_checkpointing'
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 523 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowercase ( __UpperCAmelCase ):
_lowerCAmelCase = "bert"
def __init__( self : Optional[Any] , lowercase__ : Optional[int]=3_0_5_2_2 , lowercase__ : Dict=7_6_8 , lowercase__ : str=1_2 , lowercase__ : int=1_2 , lowercase__ : Optional[Any]=3_0_7_2 , lowercase__ : Any="gelu" , lowercase__ : Tuple=0.1 , lowercase__ : int=0.1 , lowercase__ : str=5_1_2 , lowercase__ : Any=2 , lowercase__ : Tuple=0.02 , lowercase__ : int=1e-12 , lowercase__ : Union[str, Any]=0 , lowercase__ : str="absolute" , lowercase__ : List[str]=True , lowercase__ : Optional[int]=None , **lowercase__ : List[Any] , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class __lowercase ( __UpperCAmelCase ):
@property
def __magic_name__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
a_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 701 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def UpperCAmelCase__ ( _A=None ):
"""simple docstring"""
if subparsers is not None:
a_ = subparsers.add_parser('''test''' )
else:
a_ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=_A , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
a_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
a_ = script_name
else:
a_ = f"--config_file={args.config_file} {script_name}"
a_ = ['''accelerate-launch'''] + test_args.split()
a_ = execute_subprocess_async(_A , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = test_command_parser()
a_ = parser.parse_args()
test_command(_A )
if __name__ == "__main__":
main()
| 143 | 0 |
"""simple docstring"""
from __future__ import annotations
__A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> tuple[list[list[int]], list[list[int]]]:
__lowerCAmelCase: Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__SCREAMING_SNAKE_CASE ) )
] # the reference grid
__lowerCAmelCase: Tuple = 1
__lowerCAmelCase: Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__SCREAMING_SNAKE_CASE ) )
] # the action grid
__lowerCAmelCase: Tuple = init[0]
__lowerCAmelCase: Any = init[1]
__lowerCAmelCase: Optional[int] = 0
__lowerCAmelCase: int = g + heuristic[x][y] # cost from starting cell to destination cell
__lowerCAmelCase: Optional[Any] = [[f, g, x, y]]
__lowerCAmelCase: Union[str, Any] = False # flag that is set when search is complete
__lowerCAmelCase: List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowerCAmelCase: Union[str, Any] = cell.pop()
__lowerCAmelCase: Optional[int] = next_cell[2]
__lowerCAmelCase: int = next_cell[3]
__lowerCAmelCase: Optional[int] = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowerCAmelCase: int = True
else:
for i in range(len(__SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
__lowerCAmelCase: Dict = x + DIRECTIONS[i][0]
__lowerCAmelCase: str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowerCAmelCase: Tuple = g + cost
__lowerCAmelCase: Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowerCAmelCase: int = 1
__lowerCAmelCase: List[Any] = i
__lowerCAmelCase: int = []
__lowerCAmelCase: Dict = goal[0]
__lowerCAmelCase: Any = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowerCAmelCase: Tuple = x - DIRECTIONS[action[x][y]][0]
__lowerCAmelCase: Tuple = y - DIRECTIONS[action[x][y]][1]
__lowerCAmelCase: List[Any] = xa
__lowerCAmelCase: Dict = ya
invpath.append([x, y] )
__lowerCAmelCase: Tuple = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(__SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A = [0, 0]
# all coordinates are given in format [y,x]
__A = [len(grid) - 1, len(grid[0]) - 1]
__A = 1
# the cost map which pushes the path closer to the goal
__A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A = 99
__A , __A = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 346 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """xglm"""
SCREAMING_SNAKE_CASE_ : Any = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , UpperCamelCase__ : Optional[Any]=2_5_6_0_0_8 , UpperCamelCase__ : Dict=2_0_4_8 , UpperCamelCase__ : Tuple=1_0_2_4 , UpperCamelCase__ : Any=4_0_9_6 , UpperCamelCase__ : List[str]=2_4 , UpperCamelCase__ : List[str]=1_6 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Any=2 , **UpperCamelCase__ : Union[str, Any] , )-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Optional[Any] = max_position_embeddings
__lowerCAmelCase: int = d_model
__lowerCAmelCase: Tuple = ffn_dim
__lowerCAmelCase: Optional[Any] = num_layers
__lowerCAmelCase: List[str] = attention_heads
__lowerCAmelCase: Optional[Any] = activation_function
__lowerCAmelCase: Tuple = dropout
__lowerCAmelCase: Optional[Any] = attention_dropout
__lowerCAmelCase: int = activation_dropout
__lowerCAmelCase: Tuple = layerdrop
__lowerCAmelCase: List[str] = init_std
__lowerCAmelCase: str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase: List[str] = use_cache
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 346 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowercase : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith("""encoder""" ):
__lowercase : int = k.replace(""".attn""" , """.self_attn""" )
__lowercase : int = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowercase : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__lowercase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowercase : Optional[int] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__lowercase : Dict = k.replace("""norm3""" , """final_layer_norm""" )
return k
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : str = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
__lowercase : int = sd.pop(__snake_case )
__lowercase : int = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__lowercase : str = v
lowerCamelCase : Tuple = ["START"]
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ):
__lowercase : int = torch.load(__snake_case , map_location="""cpu""" )
__lowercase : int = model["model"]
__lowercase : List[Any] = BlenderbotConfig.from_json_file(__snake_case )
__lowercase : Optional[Any] = BlenderbotForConditionalGeneration(__snake_case )
__lowercase : List[Any] = m.model.state_dict().keys()
__lowercase : int = []
__lowercase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowercase : Any = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowercase : List[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 713 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs | 649 | 0 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ):
"""simple docstring"""
if not sentence:
return ""
snake_case : Optional[Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 449 |
"""simple docstring"""
from random import randint, random
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: bool = False , lowerCamelCase_: bool = False , lowerCamelCase_: int = 5 , ):
"""simple docstring"""
snake_case : str = [[-1] * number_of_cells] # Create a highway without any car
snake_case : str = 0
snake_case : Any = max(lowerCamelCase_ , 0 )
while i < number_of_cells:
snake_case : Optional[int] = (
randint(0 , lowerCamelCase_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: list , lowerCamelCase_: int ):
"""simple docstring"""
snake_case : Any = 0
snake_case : int = highway_now[car_index + 1 :]
for cell in range(len(lowerCamelCase_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCamelCase_ , -1 )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: list , lowerCamelCase_: float , lowerCamelCase_: int ):
"""simple docstring"""
snake_case : List[str] = len(lowerCamelCase_ )
# Beforce calculations, the highway is empty
snake_case : Dict = [-1] * number_of_cells
for car_index in range(lowerCamelCase_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
snake_case : Tuple = min(highway_now[car_index] + 1 , lowerCamelCase_ )
# Number of empty cell before the next car
snake_case : int = get_distance(lowerCamelCase_ , lowerCamelCase_ ) - 1
# We can't have the car causing an accident
snake_case : str = min(next_highway[car_index] , lowerCamelCase_ )
if random() < probability:
# Randomly, a driver will slow down
snake_case : Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: list , lowerCamelCase_: int , lowerCamelCase_: float , lowerCamelCase_: int ):
"""simple docstring"""
snake_case : Optional[Any] = len(highway[0] )
for i in range(lowerCamelCase_ ):
snake_case : Union[str, Any] = update(highway[i] , lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[int] = [-1] * number_of_cells
for car_index in range(lowerCamelCase_ ):
snake_case : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
snake_case : Optional[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
snake_case : Tuple = speed
highway.append(lowerCamelCase_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = sum(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ : List[str] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE_ : Tuple = s - 2 * j
break
return diff
| 685 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 1 |
def __lowercase ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("Input value must be a 'int' type" )
return bin(UpperCamelCase__ ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BarthezTokenizer
__SCREAMING_SNAKE_CASE = BarthezTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def _lowerCamelCase ( self) -> int:
super().setUp()
_A : Optional[Any] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase)
_A : Tuple = tokenizer
def _lowerCamelCase ( self) -> Tuple:
_A : Tuple = "<pad>"
_A : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase) , __lowerCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase) , __lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
_A : Any = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(vocab_keys[-1] , "<mask>")
self.assertEqual(len(__lowerCamelCase) , 1_0_1_1_2_2)
def _lowerCamelCase ( self) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _lowerCamelCase ( self) -> Tuple:
_A : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_A : List[str] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_A : str = self.tokenizer(
__lowerCamelCase , max_length=len(__lowerCamelCase) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt")
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
_A : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_A : Union[str, Any] = self.get_tokenizer()
_A : Union[str, Any] = self.get_rust_tokenizer()
_A : Union[str, Any] = "I was born in 92000, and this is falsé."
_A : Tuple = tokenizer.tokenize(__lowerCamelCase)
_A : Tuple = rust_tokenizer.tokenize(__lowerCamelCase)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
_A : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase)
_A : List[str] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.get_rust_tokenizer()
_A : Union[str, Any] = tokenizer.encode(__lowerCamelCase)
_A : Any = rust_tokenizer.encode(__lowerCamelCase)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
@slow
def _lowerCamelCase ( self) -> Optional[int]:
# fmt: off
_A : List[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_A : Any = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
| 503 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ = logging.get_logger(__name__)
A_ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'longformer'
def __init__( self , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 768 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 3072 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1E-12 , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = attention_window
lowerCamelCase_ = sep_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = onnx_export
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "default" , SCREAMING_SNAKE_CASE_ = None ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = True
@property
def UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCamelCase_ = super().outputs
if self.task == "default":
lowerCamelCase_ = {0: 'batch'}
return outputs
@property
def UpperCamelCase( self ) -> float:
'''simple docstring'''
return 1E-4
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase_ = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowerCamelCase_ = 1
return inputs | 710 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTaTokenizer
SCREAMING_SNAKE_CASE_ = GPTaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = {'add_prefix_space': True}
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = 'lower newer'
return input_text, output_text
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'lower newer'
# Testing tokenization
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing the unknown token
lowerCamelCase_ = tokens + [rust_tokenizer.unk_token]
lowerCamelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Simple input
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ = ('This is a simple input', 'This is a pair')
lowerCamelCase_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase_ = ('This is a simple input', 'This is a pair')
lowerCamelCase_ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase_ = tokenizer.pad_token_id
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=30 , return_tensors='np' )
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncate=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowerCamelCase_ = tokenizer(*SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=60 , return_tensors='np' )
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncate=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = '$$$'
lowerCamelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE_ , add_bos_token=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ = tokenizer.bos_token_id
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase_ = tokenizer.decode(out_s.input_ids )
lowerCamelCase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , add_bos_token=SCREAMING_SNAKE_CASE_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase_ = 'Encode this.'
lowerCamelCase_ = 'This one too please.'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = encoded_sequence_dict['input_ids']
lowerCamelCase_ = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE_ )
]
lowerCamelCase_ = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'A photo of a cat'
lowerCamelCase_ = tokenizer.encode(
SCREAMING_SNAKE_CASE_ , )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('./test_opt' )
lowerCamelCase_ = tokenizer.encode(
SCREAMING_SNAKE_CASE_ , )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [2, 250, 1345, 9, 10, 4758] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'A photo of a cat'
lowerCamelCase_ = tokenizer.encode(
SCREAMING_SNAKE_CASE_ , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'bos'
lowerCamelCase_ = tokenizer.get_vocab()['bos']
lowerCamelCase_ = 'A photo of a cat'
lowerCamelCase_ = tokenizer.encode(
SCREAMING_SNAKE_CASE_ , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
lowerCamelCase_ = tokenizer.encode(
SCREAMING_SNAKE_CASE_ , )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [31957, 250, 1345, 9, 10, 4758] )
| 384 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1_6000 ):
'''simple docstring'''
__lowerCAmelCase = int(round(sample_rate * max_length ) )
if len(_A ) <= sample_length:
return wav
__lowerCAmelCase = randint(0 , len(_A ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =field(default=__UpperCamelCase ,metadata={"""help""": """Name of a dataset from the datasets package"""} )
__UpperCAmelCase : Any =field(
default=__UpperCamelCase ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase : Tuple =field(
default=__UpperCamelCase ,metadata={"""help""": """A file containing the training audio paths and labels."""} )
__UpperCAmelCase : Any =field(
default=__UpperCamelCase ,metadata={"""help""": """A file containing the validation audio paths and labels."""} )
__UpperCAmelCase : Any =field(
default="""train""" ,metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to \'train\'"""
} ,)
__UpperCAmelCase : Optional[Any] =field(
default="""validation""" ,metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to \'validation\'"""
)
} ,)
__UpperCAmelCase : Union[str, Any] =field(
default="""audio""" ,metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to \'audio\'"""} ,)
__UpperCAmelCase : Tuple =field(
default="""label""" ,metadata={"""help""": """The name of the dataset column containing the labels. Defaults to \'label\'"""} )
__UpperCAmelCase : Optional[Any] =field(
default=__UpperCamelCase ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} ,)
__UpperCAmelCase : Any =field(
default=__UpperCamelCase ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} ,)
__UpperCAmelCase : Any =field(
default=2_0 ,metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} ,)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =field(
default="""facebook/wav2vec2-base""" ,metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ,)
__UpperCAmelCase : str =field(
default=__UpperCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase : str =field(
default=__UpperCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
__UpperCAmelCase : List[str] =field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
__UpperCAmelCase : str =field(
default=__UpperCamelCase ,metadata={"""help""": """Name or path of preprocessor config."""} )
__UpperCAmelCase : List[Any] =field(
default=__UpperCamelCase ,metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
__UpperCAmelCase : List[Any] =field(
default=__UpperCamelCase ,metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
__UpperCAmelCase : Optional[Any] =field(
default=__UpperCamelCase ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
__UpperCAmelCase : Dict =field(
default=__UpperCamelCase ,metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__UpperCAmelCase : Union[str, Any] =field(
default=__UpperCamelCase ,metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} ,)
def snake_case ( self ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , __a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _A , _A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
__lowerCAmelCase = DatasetDict()
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__lowerCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__lowerCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase ):
__lowerCAmelCase = []
for audio in batch[data_args.audio_column_name]:
__lowerCAmelCase = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_A )
__lowerCAmelCase = feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate )
__lowerCAmelCase = {model_input_name: inputs.get(_A )}
__lowerCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase ):
__lowerCAmelCase = [audio["array"] for audio in batch[data_args.audio_column_name]]
__lowerCAmelCase = feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate )
__lowerCAmelCase = {model_input_name: inputs.get(_A )}
__lowerCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCAmelCase = raw_datasets["train"].features[data_args.label_column_name].names
__lowerCAmelCase = {}, {}
for i, label in enumerate(_A ):
__lowerCAmelCase = str(_A )
__lowerCAmelCase = label
# Load the accuracy metric from the datasets package
__lowerCAmelCase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
__lowerCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_A , references=eval_pred.label_ids )
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_A ) , labelaid=_A , idalabel=_A , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__lowerCAmelCase = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_A , output_all_columns=_A )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__lowerCAmelCase = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_A , output_all_columns=_A )
# Initialize our trainer
__lowerCAmelCase = Trainer(
model=_A , args=_A , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCAmelCase = trainer.evaluate()
trainer.log_metrics("eval" , _A )
trainer.save_metrics("eval" , _A )
# Write model card and (optionally) push to hub
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
if __name__ == "__main__":
main()
| 636 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _A, _A, _A ):
"""simple docstring"""
# Initialise PyTorch model
snake_case_ :List[Any] = BertConfig.from_json_file(_A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ :Any = BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A, _A, _A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 584 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=1_8 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=None , ) -> Optional[int]:
__UpperCamelCase : Optional[Any] = size if size is not None else {"height": 2_0, "width": 2_0}
__UpperCamelCase : Optional[int] = parent
__UpperCamelCase : Optional[int] = batch_size
__UpperCamelCase : List[str] = num_channels
__UpperCamelCase : Optional[Any] = image_size
__UpperCamelCase : List[str] = min_resolution
__UpperCamelCase : List[str] = max_resolution
__UpperCamelCase : Optional[int] = size
__UpperCamelCase : Union[str, Any] = do_normalize
__UpperCamelCase : Optional[int] = do_convert_rgb
__UpperCamelCase : Union[str, Any] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
__UpperCamelCase : List[str] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def a_ (self ) -> List[str]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def a_ (self ) -> str:
__UpperCamelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
__UpperCamelCase : List[str] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = PixaStructImageProcessor if is_vision_available() else None
def a_ (self ) -> str:
__UpperCamelCase : List[str] = PixaStructImageProcessingTester(self )
@property
def a_ (self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Optional[int] = self.image_processor_tester.prepare_dummy_image()
__UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
__UpperCamelCase : Any = 2_0_4_8
__UpperCamelCase : str = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def a_ (self ) -> List[str]:
# Initialize image_processor
__UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCamelCase : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCamelCase : Dict = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def a_ (self ) -> Optional[Any]:
# Initialize image_processor
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
__UpperCamelCase : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
__UpperCamelCase : Tuple = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
__UpperCamelCase : Tuple = "Hello"
__UpperCamelCase : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCamelCase : Union[str, Any] = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def a_ (self ) -> Dict:
# Initialize image_processor
__UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
__UpperCamelCase : List[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCamelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCamelCase : Any = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def a_ (self ) -> Optional[int]:
# Initialize image_processor
__UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCamelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCamelCase : Union[str, Any] = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = PixaStructImageProcessor if is_vision_available() else None
def a_ (self ) -> Tuple:
__UpperCamelCase : Tuple = PixaStructImageProcessingTester(self , num_channels=4 )
__UpperCamelCase : int = 3
@property
def a_ (self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> int:
__UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def a_ (self ) -> Optional[int]:
# Initialize image_processor
__UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCamelCase : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCamelCase : List[str] = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 399 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "funnel"
A = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__(self , _UpperCAmelCase=3_0_5_2_2 , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=None , _UpperCAmelCase=2 , _UpperCAmelCase=7_6_8 , _UpperCAmelCase=1_2 , _UpperCAmelCase=6_4 , _UpperCAmelCase=3_0_7_2 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=None , _UpperCAmelCase=1E-9 , _UpperCAmelCase="mean" , _UpperCAmelCase="relative_shift" , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> Any:
__UpperCamelCase : Union[str, Any] = vocab_size
__UpperCamelCase : Any = block_sizes
__UpperCamelCase : str = [1] * len(_UpperCAmelCase ) if block_repeats is None else block_repeats
assert len(_UpperCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__UpperCamelCase : Union[str, Any] = num_decoder_layers
__UpperCamelCase : List[Any] = d_model
__UpperCamelCase : Optional[int] = n_head
__UpperCamelCase : Tuple = d_head
__UpperCamelCase : str = d_inner
__UpperCamelCase : Dict = hidden_act
__UpperCamelCase : Any = hidden_dropout
__UpperCamelCase : Dict = attention_dropout
__UpperCamelCase : Dict = activation_dropout
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : Optional[int] = initializer_std
__UpperCamelCase : Optional[Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
__UpperCamelCase : Union[str, Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
__UpperCamelCase : int = attention_type
__UpperCamelCase : Dict = separate_cls
__UpperCamelCase : List[str] = truncate_seq
__UpperCamelCase : List[str] = pool_q_only
super().__init__(**_UpperCAmelCase )
@property
def a_ (self ) -> Optional[Any]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def a_ (self , _UpperCAmelCase ) -> List[Any]:
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def a_ (self ) -> List[Any]:
return len(self.block_sizes )
@num_blocks.setter
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 399 | 1 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ = '\\n Text data.\n Second line of data.'
lowerCAmelCase__ = 'file'
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[str] = tmp_path_factory.mktemp('data') / (FILE_PATH + '.zstd')
UpperCamelCase__ : Dict = bytes(lowerCamelCase_ , 'utf-8')
with zstd.open(lowerCamelCase_ , 'wb') as f:
f.write(lowerCamelCase_)
return path
@pytest.fixture
def __UpperCAmelCase ( lowerCamelCase_) -> str:
with open(os.path.join(tmpfs.local_root_dir , lowerCamelCase_) , 'w') as f:
f.write(lowerCamelCase_)
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Union[str, Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
UpperCamelCase__ : Tuple = input_paths[compression_format]
UpperCamelCase__ : List[str] = tmp_path / 'cache'
UpperCamelCase__ : Any = DownloadConfig(cache_dir=lowerCamelCase_ , extract_compressed_file=lowerCamelCase_)
UpperCamelCase__ : str = cached_path(lowerCamelCase_ , download_config=lowerCamelCase_)
with open(lowerCamelCase_) as f:
UpperCamelCase__ : int = f.read()
with open(lowerCamelCase_) as f:
UpperCamelCase__ : str = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False])
@pytest.mark.parametrize('default_cache_dir' , [True, False])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = 'custom_cache'
UpperCamelCase__ : Union[str, Any] = 'custom_extracted_dir'
UpperCamelCase__ : Union[str, Any] = tmp_path / 'custom_extracted_path'
if default_extracted:
UpperCamelCase__ : Optional[int] = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , lowerCamelCase_)
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCamelCase_))
UpperCamelCase__ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase__ : Any = xz_file
UpperCamelCase__ : int = (
DownloadConfig(extract_compressed_file=lowerCamelCase_)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCamelCase_)
)
UpperCamelCase__ : Dict = cached_path(lowerCamelCase_ , download_config=lowerCamelCase_)
assert Path(lowerCamelCase_).parent.parts[-2:] == expected
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
# absolute path
UpperCamelCase__ : Any = str(Path(lowerCamelCase_).resolve())
assert cached_path(lowerCamelCase_) == text_file
# relative path
UpperCamelCase__ : List[Any] = str(Path(lowerCamelCase_).resolve().relative_to(Path(os.getcwd())))
assert cached_path(lowerCamelCase_) == text_file
def __UpperCAmelCase ( lowerCamelCase_) -> int:
# absolute path
UpperCamelCase__ : Union[str, Any] = str(tmp_path.resolve() / '__missing_file__.txt')
with pytest.raises(lowerCamelCase_):
cached_path(lowerCamelCase_)
# relative path
UpperCamelCase__ : Tuple = './__missing_file__.txt'
with pytest.raises(lowerCamelCase_):
cached_path(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : Tuple = get_from_cache(f'tmp://{tmpfs_file}')
with open(lowerCamelCase_) as f:
UpperCamelCase__ : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_)
def __UpperCAmelCase ( ) -> List[str]:
with pytest.raises(lowerCamelCase_):
cached_path('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : Any = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(lowerCamelCase_):
http_get('https://huggingface.co' , temp_file=lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
http_head('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : int = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(lowerCamelCase_):
ftp_get('ftp://huggingface.co' , temp_file=lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
ftp_head('ftp://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
UpperCamelCase__ : int = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(lowerCamelCase_):
fsspec_get('s3://huggingface.co' , temp_file=lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
fsspec_head('s3://huggingface.co')
| 596 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0')
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0')
if years_to_repay <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_):
raise Exception('Years to repay must be an integer > 0')
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase__ : Optional[int] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase__ : List[str] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 596 | 1 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : str = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''efficientformer'''
def __init__( self , _lowerCAmelCase = [3, 2, 6, 4] , _lowerCAmelCase = [48, 96, 224, 448] , _lowerCAmelCase = [True, True, True, True] , _lowerCAmelCase = 448 , _lowerCAmelCase = 32 , _lowerCAmelCase = 4 , _lowerCAmelCase = 7 , _lowerCAmelCase = 5 , _lowerCAmelCase = 8 , _lowerCAmelCase = 4 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 16 , _lowerCAmelCase = 3 , _lowerCAmelCase = 3 , _lowerCAmelCase = 3 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 1E-5 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = 224 , _lowerCAmelCase = 1E-05 , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = hidden_sizes
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = patch_size
lowercase = num_channels
lowercase = depths
lowercase = mlp_expansion_ratio
lowercase = downsamples
lowercase = dim
lowercase = key_dim
lowercase = attention_ratio
lowercase = resolution
lowercase = pool_size
lowercase = downsample_patch_size
lowercase = downsample_stride
lowercase = downsample_pad
lowercase = drop_path_rate
lowercase = num_metaad_blocks
lowercase = distillation
lowercase = use_layer_scale
lowercase = layer_scale_init_value
lowercase = image_size
lowercase = batch_norm_eps
| 653 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCamelCase : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["input_values", "padding_mask"]
def __init__( self: List[Any] , UpperCamelCase: int = 1 , UpperCamelCase: int = 2_40_00 , UpperCamelCase: float = 0.0 , UpperCamelCase: float = None , UpperCamelCase: float = None , **UpperCamelCase: Optional[int] , ) -> int:
super().__init__(feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , **UpperCamelCase )
snake_case__ = chunk_length_s
snake_case__ = overlap
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self: Union[str, Any] , UpperCamelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase: Optional[Union[bool, str, PaddingStrategy]] = None , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
snake_case__ = True
snake_case__ = bool(
isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
snake_case__ = [np.asarray(UpperCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ):
snake_case__ = np.asarray(UpperCamelCase , dtype=np.floataa )
elif isinstance(UpperCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
snake_case__ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ = [np.asarray(UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
snake_case__ = None
snake_case__ = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
snake_case__ = min(array.shape[0] for array in raw_audio )
snake_case__ = int(np.floor(max_length / self.chunk_stride ) )
snake_case__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
snake_case__ = max(array.shape[0] for array in raw_audio )
snake_case__ = int(np.ceil(max_length / self.chunk_stride ) )
snake_case__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
snake_case__ = 'max_length'
else:
snake_case__ = input_values
# normal padding on batch
if padded_inputs is None:
snake_case__ = self.pad(
UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , padding=UpperCamelCase , return_attention_mask=UpperCamelCase , )
if padding:
snake_case__ = padded_inputs.pop('attention_mask' )
snake_case__ = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
snake_case__ = example[..., None]
input_values.append(example.T )
snake_case__ = input_values
if return_tensors is not None:
snake_case__ = padded_inputs.convert_to_tensors(UpperCamelCase )
return padded_inputs
| 328 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__UpperCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
__UpperCamelCase : List[str] = """sshleifer/student_marian_en_ro_6_1"""
__UpperCamelCase : int = """sshleifer/tiny-mbart"""
@require_torch
class __SCREAMING_SNAKE_CASE( a_ ):
def lowerCAmelCase_ ( self: int , UpperCamelCase: Any=False , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[Any]=True , ) -> Tuple:
snake_case__ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCamelCase , num_train_epochs=1 , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , predict_with_generate=UpperCamelCase , do_train=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , )
snake_case__ = TrainerState.load_from_json(os.path.join(UpperCamelCase , 'trainer_state.json' ) ).log_history
if not do_eval:
return
snake_case__ = [log for log in logs if 'eval_loss' in log.keys()]
snake_case__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , UpperCamelCase )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: Any ) -> int:
self.run_seqaseq_quick(distributed=UpperCamelCase )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: Tuple ) -> int:
self.run_seqaseq_quick(distributed=UpperCamelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: Any ) -> Any:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: int ) -> Tuple:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=UpperCamelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=UpperCamelCase , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=UpperCamelCase )
@require_apex
@require_torch_gpu
def lowerCAmelCase_ ( self: Tuple ) -> Any:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] ) -> str:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case__ = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
snake_case__ = experiments[experiment_id]
snake_case__ = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
snake_case__ = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCamelCase , extra_args_str=data['extra_args_str'] )
snake_case__ = len(re.findall(UpperCamelCase , cl.err ) )
self.assertEqual(UpperCamelCase , data['n_matches'] )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCamelCase , )
# Check metrics
snake_case__ = TrainerState.load_from_json(os.path.join(UpperCamelCase , 'trainer_state.json' ) ).log_history
snake_case__ = [log for log in logs if 'eval_loss' in log.keys()]
snake_case__ = eval_metrics[0]
snake_case__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , UpperCamelCase )
# test if do_predict saves generations and metrics
snake_case__ = os.listdir(UpperCamelCase )
snake_case__ = {os.path.basename(UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase_ ( self: int ) -> int:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCamelCase: str ) -> Tuple[int, float]:
snake_case__ = '--skip_memory_metrics 0'
snake_case__ = self.run_trainer(
max_len=1_28 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCamelCase , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
snake_case__ = TrainerState.load_from_json(Path(UpperCamelCase , 'trainer_state.json' ) ).log_history
snake_case__ = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
snake_case__ = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
snake_case__ = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCamelCase , UpperCamelCase , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
UpperCamelCase , UpperCamelCase , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
UpperCamelCase , UpperCamelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: float = 3e-3 , UpperCamelCase: str = "adafactor" , UpperCamelCase: bool = False , UpperCamelCase: str = None , UpperCamelCase: int = 0 , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: int = None , ) -> Union[str, Any]:
snake_case__ = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case__ = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(UpperCamelCase )}
'''.split()
snake_case__ = '\n --do_predict\n '.split()
snake_case__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ = get_gpu_count()
snake_case__ = get_torch_dist_unique_port()
snake_case__ = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase , env=self.get_env() )
else:
snake_case__ = ['run_translation.py'] + args
with patch.object(UpperCamelCase , 'argv' , UpperCamelCase ):
main()
return output_dir
| 328 | 1 |
"""simple docstring"""
__UpperCAmelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__UpperCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
__UpperCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 709 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-1'
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-2'
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-3'
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-4'
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A , __A , __A , __A , __A , __A , __A = True , ) -> Any:
super()._init_()
lowerCAmelCase_ :Dict = StableDiffusionPipeline.from_pretrained(__A )
lowerCAmelCase_ :Optional[int] = StableDiffusionPipeline.from_pretrained(__A )
lowerCAmelCase_ :List[str] = StableDiffusionPipeline.from_pretrained(__A )
lowerCAmelCase_ :Dict = StableDiffusionPipeline(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , safety_checker=__A , feature_extractor=__A , requires_safety_checker=__A , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , __A ) for k in self.config.keys() if not k.startswith("""_""" )}
def __lowerCAmelCase ( self , __A = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase_ :List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.enable_attention_slicing(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Dict:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Dict:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> str:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Any:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> List[Any]:
lowerCAmelCase_ :List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(__A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCAmelCase_ :Union[str, Any] = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCAmelCase_ :Any = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCAmelCase_ :Dict = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCAmelCase_ :Union[str, Any] = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 256 | 0 |
def snake_case_ ( _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 7 , _SCREAMING_SNAKE_CASE = 1_0_0_0_0_0_0 ):
__lowercase = 0
__lowercase = 1
for current_denominator in range(1 , limit + 1 ):
__lowercase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowercase = current_numerator
__lowercase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 402 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
snake_case__ : List[str] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
snake_case__ : Any = """sshleifer/student_marian_en_ro_6_1"""
snake_case__ : Dict = """sshleifer/tiny-mbart"""
@require_torch
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] , lowerCamelCase : Dict=False , lowerCamelCase : Any=None , lowerCamelCase : int=True , lowerCamelCase : str=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[Any]=True , ):
'''simple docstring'''
__lowercase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowerCamelCase , num_train_epochs=1 , distributed=lowerCamelCase , extra_args_str=lowerCamelCase , predict_with_generate=lowerCamelCase , do_train=lowerCamelCase , do_eval=lowerCamelCase , do_predict=lowerCamelCase , )
__lowercase = TrainerState.load_from_json(os.path.join(lowerCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
__lowercase = [log for log in logs if "eval_loss" in log.keys()]
__lowercase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowercase = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , lowerCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCamelCase )
@require_torch_multi_gpu
def _snake_case ( self : Any ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : Any ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : Any ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : str ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : List[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(
distributed=lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=lowerCamelCase )
@require_apex
@require_torch_gpu
def _snake_case ( self : Any ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def _snake_case ( self : List[Any] , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
__lowercase = experiments[experiment_id]
__lowercase = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
__lowercase = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowerCamelCase , extra_args_str=data["extra_args_str"] )
__lowercase = len(re.findall(lowerCamelCase , cl.err ) )
self.assertEqual(lowerCamelCase , data["n_matches"] )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=lowerCamelCase , )
# Check metrics
__lowercase = TrainerState.load_from_json(os.path.join(lowerCamelCase , "trainer_state.json" ) ).log_history
__lowercase = [log for log in logs if "eval_loss" in log.keys()]
__lowercase = eval_metrics[0]
__lowercase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , lowerCamelCase )
# test if do_predict saves generations and metrics
__lowercase = os.listdir(lowerCamelCase )
__lowercase = {os.path.basename(lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _snake_case ( self : Dict ):
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowerCamelCase : str ) -> Tuple[int, float]:
__lowercase = "--skip_memory_metrics 0"
__lowercase = self.run_trainer(
max_len=128 , model_name=lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=lowerCamelCase , distributed=lowerCamelCase , extra_args_str=lowerCamelCase , do_eval=lowerCamelCase , do_predict=lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
__lowercase = TrainerState.load_from_json(Path(lowerCamelCase , "trainer_state.json" ) ).log_history
__lowercase = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
__lowercase = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
__lowercase = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowercase , __lowercase , __lowercase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowercase , __lowercase , __lowercase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowercase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowercase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowercase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowercase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowercase = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowerCamelCase , lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
lowerCamelCase , lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
lowerCamelCase , lowerCamelCase , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : float = 3e-3 , lowerCamelCase : str = "adafactor" , lowerCamelCase : bool = False , lowerCamelCase : str = None , lowerCamelCase : int = 0 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : int = None , ):
'''simple docstring'''
__lowercase = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowerCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowerCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
__lowercase = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowerCamelCase )}
""".split()
__lowercase = "\n --do_predict\n ".split()
__lowercase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowercase = get_gpu_count()
__lowercase = get_torch_dist_unique_port()
__lowercase = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
__lowercase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
else:
__lowercase = ["run_translation.py"] + args
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
main()
return output_dir
| 402 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 19 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __A( unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=56 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="block_sparse" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_choices
UpperCamelCase__ = rescale_embeddings
UpperCamelCase__ = attention_type
UpperCamelCase__ = use_bias
UpperCamelCase__ = block_size
UpperCamelCase__ = num_random_blocks
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase_ (self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase_ (self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase_ (self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase_ (self ):
super().test_hidden_states_output()
@slow
def UpperCAmelCase_ (self ):
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
return model(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="outputs" , SCREAMING_SNAKE_CASE_=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 86 |
lowerCamelCase_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def __magic_name__ ( __a : int ):
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCamelCase_ = [None] * 10_00_00_00
lowerCamelCase_ = True
lowerCamelCase_ = False
def __magic_name__ ( __a : int ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__a ) )
UpperCamelCase__ = number_chain
while number < 10_000_000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def __magic_name__ ( __a : int = 10_000_000 ):
'''simple docstring'''
for i in range(1 , __a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 86 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase_ (metaclass=lowercase__ ):
snake_case =['keras_nlp']
def __init__( self , *lowercase_ , **lowercase_) -> str:
requires_backends(self , ['keras_nlp'])
| 20 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 1 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
try:
with open(snake_case__ , """rb""" ) as flax_state_f:
_snake_case : int = from_bytes(snake_case__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[Any] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_snake_case : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_snake_case : Tuple = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
_snake_case : Optional[int] = """"""
_snake_case : Dict = flatten_dict(snake_case__ , sep=""".""" )
_snake_case : int = pt_model.state_dict()
# keep track of unexpected & missing keys
_snake_case : Optional[Any] = []
_snake_case : str = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_snake_case : Tuple = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_snake_case : str = flax_key_tuple_array[:-1] + ["""weight"""]
_snake_case : str = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_snake_case : List[str] = flax_key_tuple_array[:-1] + ["""weight"""]
_snake_case : str = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_snake_case : Union[str, Any] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case__ ):
_snake_case : str = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_snake_case : Dict = """.""".join(snake_case__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
_snake_case : Optional[Any] = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
_snake_case : Optional[Any] = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
_snake_case : Tuple = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(snake_case__ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 28 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 1 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=13_37 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def a__ ( lowerCAmelCase ) -> int:
UpperCAmelCase__ : Any = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ : List[str] = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase__ : List[Any] = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase__ : Tuple = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE_ ), SplitInfo(dataset_name="""my_dataset""" )] )
def a__ ( lowerCAmelCase ) -> Optional[Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCAmelCase__ : int = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 182 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : torch.FloatTensor
a : Optional[torch.FloatTensor] = None
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0.999 ,SCREAMING_SNAKE_CASE_="cosine" ,) -> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase__ : List[str] = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : int = i / num_diffusion_timesteps
lowercase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ ,dtype=torch.floataa )
class UpperCAmelCase( snake_case_ , snake_case_ ):
"""simple docstring"""
a : int = 1
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = 0.00_01 , lowerCamelCase = 0.02 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 0 , lowerCamelCase = "epsilon" , lowerCamelCase = 1.0 , **lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
if kwargs.get("set_alpha_to_one" , lowerCamelCase ) is not None:
lowercase__ : Any = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
lowercase__ : Optional[int] = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase__ : Optional[int] = torch.tensor(lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase__ : int = torch.linspace(lowerCamelCase , lowerCamelCase , lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ : Optional[Any] = betas_for_alpha_bar(lowerCamelCase )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase__ : List[str] = 1.0 - self.betas
lowercase__ : List[str] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase__ : Dict = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase__ : Optional[int] = 1.0
# setable values
lowercase__ : int = None
lowercase__ : int = torch.from_numpy(np.arange(0 , lowerCamelCase ).copy().astype(np.intaa ) )
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Optional[Any]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowercase__ : Optional[Any] = num_inference_steps
lowercase__ : Dict = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ : Dict = (np.arange(0 , lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
lowercase__ : Any = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
self.timesteps += self.config.steps_offset
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase__ : Optional[int] = self.alphas_cumprod[timestep]
lowercase__ : Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase__ : Optional[int] = model_output
elif self.config.prediction_type == "sample":
lowercase__ : Any = model_output
lowercase__ : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase__ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase__ : Any = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase__ : Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : int = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
def __len__( self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps | 397 | 0 |
from __future__ import annotations
_lowercase = 10
def _A (UpperCamelCase : list[int] ) ->list[int]:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : List[Any] = max(UpperCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase__ : list[list] = [[] for _ in range(UpperCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase__ : Dict = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase )
# put each buckets' contents into list_of_ints
lowerCamelCase__ : List[Any] = 0
for b in range(UpperCamelCase ):
for i in buckets[b]:
lowerCamelCase__ : Tuple = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_lowercase = TypeVar('''T''')
class __A ( Generic[T] ):
def __init__(self , __magic_name__ = True ):
lowerCamelCase__ : dict[T, list[T]] = {} # dictionary of lists
lowerCamelCase__ : List[str] = directed
def _snake_case (self , __magic_name__ , __magic_name__ ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__magic_name__ )
self.adj_list[destination_vertex].append(__magic_name__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__magic_name__ )
lowerCamelCase__ : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__magic_name__ )
lowerCamelCase__ : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCamelCase__ : Any = [destination_vertex]
lowerCamelCase__ : Optional[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__magic_name__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__magic_name__ )
lowerCamelCase__ : Optional[Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCamelCase__ : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCamelCase__ : Optional[int] = [destination_vertex]
lowerCamelCase__ : List[Any] = []
return self
def __repr__(self ):
return pformat(self.adj_list )
| 96 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
__UpperCAmelCase : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
__UpperCAmelCase : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
__UpperCAmelCase : str = "question"
__UpperCAmelCase : str = "context"
__UpperCAmelCase : str = "answers"
@property
def __snake_case ( self : Optional[Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 81 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowercase : Tuple = None
lowercase : int = logging.get_logger(__name__)
lowercase : Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Optional[int] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowercase : List[Any] = {
"""facebook/nllb-large-en-ro""": 1_0_2_4,
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
lowercase : Union[str, Any] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[str] = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = PRETRAINED_VOCAB_FILES_MAP
__A : List[str] = ['''input_ids''', '''attention_mask''']
__A : Union[str, Any] = NllbTokenizer
__A : List[int] = []
__A : List[int] = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , lowercase=False , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else mask_token
a__ : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , legacy_behaviour=lowercase , **lowercase , )
a__ : Dict = vocab_file
a__ : str = False if not self.vocab_file else True
a__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens})
a__ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(lowercase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a__ : Any = src_lang if src_lang is not None else 'eng_Latn'
a__ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
a__ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
a__ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , **lowercase) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
a__ : Optional[Any] = src_lang
a__ : Optional[Any] = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase)
a__ : int = self.convert_tokens_to_ids(lowercase)
a__ : Optional[int] = tgt_lang_id
return inputs
def __lowercase ( self , lowercase , lowercase = "eng_Latn" , lowercase = None , lowercase = "fra_Latn" , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
a__ : List[str] = src_lang
a__ : Any = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
a__ : str = self.convert_tokens_to_ids(lowercase)
if self.legacy_behaviour:
a__ : List[Any] = []
a__ : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
a__ : Any = [self.cur_lang_code]
a__ : Optional[Any] = [self.eos_token_id]
a__ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
a__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens)
a__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
a__ : Union[str, Any] = self.convert_tokens_to_ids(lowercase)
if self.legacy_behaviour:
a__ : Optional[Any] = []
a__ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
a__ : Dict = [self.cur_lang_code]
a__ : List[Any] = [self.eos_token_id]
a__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens)
a__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens)
a__ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowercase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.')
return
a__ : Optional[Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase):
copyfile(self.vocab_file , lowercase)
return (out_vocab_file,)
| 392 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = '''time_series_transformer'''
__A : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowercase = None , lowercase = None , lowercase = "student_t" , lowercase = "nll" , lowercase = 1 , lowercase = [1, 2, 3, 4, 5, 6, 7] , lowercase = "mean" , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 32 , lowercase = 32 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = True , lowercase = "gelu" , lowercase = 64 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 100 , lowercase = 0.02 , lowercase=True , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = prediction_length
a__ : str = context_length or prediction_length
a__ : List[str] = distribution_output
a__ : Any = loss
a__ : List[str] = input_size
a__ : int = num_time_features
a__ : Tuple = lags_sequence
a__ : int = scaling
a__ : Union[str, Any] = num_dynamic_real_features
a__ : List[str] = num_static_real_features
a__ : Tuple = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ : Optional[int] = cardinality
else:
a__ : str = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ : Tuple = embedding_dimension
else:
a__ : Optional[Any] = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : Tuple = input_size * len(lowercase) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : List[str] = encoder_attention_heads
a__ : List[str] = decoder_attention_heads
a__ : List[Any] = encoder_ffn_dim
a__ : Any = decoder_ffn_dim
a__ : Dict = encoder_layers
a__ : int = decoder_layers
a__ : List[Any] = dropout
a__ : str = attention_dropout
a__ : Any = activation_dropout
a__ : List[Any] = encoder_layerdrop
a__ : Any = decoder_layerdrop
a__ : int = activation_function
a__ : List[Any] = init_std
a__ : Union[str, Any] = use_cache
super().__init__(is_encoder_decoder=lowercase , **lowercase)
@property
def __lowercase ( self) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 392 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
a : Dict = 8
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any=BITS ) -> Any:
__snake_case = x.device
__snake_case = (x * 2_55).int().clamp(0 , 2_55 )
__snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCAmelCase )
__snake_case = rearrange(_UpperCAmelCase , "d -> d 1 1" )
__snake_case = rearrange(_UpperCAmelCase , "b c h w -> b c 1 h w" )
__snake_case = ((x & mask) != 0).float()
__snake_case = rearrange(_UpperCAmelCase , "b c d h w -> b (c d) h w" )
__snake_case = bits * 2 - 1
return bits
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]=BITS ) -> List[Any]:
__snake_case = x.device
__snake_case = (x > 0).int()
__snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCAmelCase , dtype=torch.intaa )
__snake_case = rearrange(_UpperCAmelCase , "d -> d 1 1" )
__snake_case = rearrange(_UpperCAmelCase , "b (c d) h w -> b c d h w" , d=8 )
__snake_case = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def __UpperCAmelCase ( self : Optional[Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : int=None , _UpperCAmelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__snake_case = self.alphas_cumprod[timestep]
__snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__snake_case = self.bit_scale
if self.config.clip_sample:
__snake_case = torch.clamp(_UpperCAmelCase , -scale , _UpperCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__snake_case = self._get_variance(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__snake_case = model_output.device if torch.is_tensor(_UpperCAmelCase ) else "cpu"
__snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_UpperCAmelCase ).to(_UpperCAmelCase )
__snake_case = self._get_variance(_UpperCAmelCase , _UpperCAmelCase ) ** 0.5 * eta * noise
__snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
def __UpperCAmelCase ( self : Tuple , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Dict="epsilon" , _UpperCAmelCase : Dict=None , _UpperCAmelCase : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
__snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__snake_case , __snake_case = torch.split(_UpperCAmelCase , sample.shape[1] , dim=1 )
else:
__snake_case = None
# 1. compute alphas, betas
__snake_case = self.alphas_cumprod[t]
__snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
__snake_case = 1 - alpha_prod_t
__snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__snake_case = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
__snake_case = self.bit_scale
if self.config.clip_sample:
__snake_case = torch.clamp(_UpperCAmelCase , -scale , _UpperCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__snake_case = 0
if t > 0:
__snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_UpperCAmelCase ).to(model_output.device )
__snake_case = (self._get_variance(_UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
__snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , a_ : UNetaDConditionModel , a_ : Union[DDIMScheduler, DDPMScheduler] , a_ : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
__snake_case = bit_scale
__snake_case = (
ddim_bit_scheduler_step if isinstance(a_ , a_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self : int , a_ : Optional[int] = 256 , a_ : Optional[int] = 256 , a_ : Optional[int] = 50 , a_ : Optional[torch.Generator] = None , a_ : Optional[int] = 1 , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : Dict , ):
"""simple docstring"""
__snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=a_ , )
__snake_case = decimal_to_bits(a_ ) * self.bit_scale
__snake_case = latents.to(self.device )
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__snake_case = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(a_ , a_ , a_ ).prev_sample
__snake_case = bits_to_decimal(a_ )
if output_type == "pil":
__snake_case = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 69 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : List[str] , a_ : Tuple=3 , a_ : Any=7 , a_ : Any=True , a_ : Union[str, Any]=True , a_ : Tuple=False , a_ : Optional[int]=True , a_ : Any=99 , a_ : Dict=32 , a_ : Dict=5 , a_ : List[Any]=4 , a_ : Any=37 , a_ : Any="gelu" , a_ : List[str]=0.1 , a_ : Dict=0.1 , a_ : Optional[Any]=512 , a_ : List[Any]=16 , a_ : Any=2 , a_ : str=0.02 , a_ : Any=3 , a_ : List[Any]=4 , a_ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def A ( self : Any ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a_ , )
def A ( self : List[str] , a_ : Dict , a_ : Tuple , a_ : Optional[Any] , a_ : Dict , a_ : Dict , a_ : Dict , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = FalconModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Any , a_ : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Tuple , a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = True
__snake_case = FalconModel(a_ )
model.to(a_ )
model.eval()
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , )
__snake_case = model(a_ , attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[int] , a_ : int , a_ : int , a_ : List[Any] , a_ : str , a_ : List[str] , a_ : str , a_ : str , a_ : Union[str, Any] , a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , a_ : Optional[int] , a_ : Optional[Any] , a_ : str , a_ : Tuple , a_ : str , a_ : List[Any] , a_ : Optional[Any] , a_ : Any , a_ : Dict , ):
"""simple docstring"""
__snake_case = True
__snake_case = True
__snake_case = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , use_cache=a_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_hidden_states=a_ , )["hidden_states"][0]
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )["hidden_states"][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FalconForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = FalconModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , *__snake_case = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__snake_case = alibi
self.model_tester.create_and_check_model(a_ , *a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = "single_label_classification"
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = input_dict["input_ids"]
__snake_case = FalconForCausalLM(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , use_cache=a_ )
__snake_case = input_ids.shape[0]
__snake_case = model._convert_to_rw_cache(result.past_key_values )
__snake_case = model._convert_cache_to_standard_format(a_ , a_ )
for layer in range(len(a_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = "multi_label_classification"
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Dict ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a_ , "use_cache" ):
return
__snake_case = model_class(a_ ).to(a_ )
if "use_cache" not in inputs:
__snake_case = True
__snake_case = model(**a_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__snake_case = (
getattr(a_ , "decoder_layers" , a_ )
or getattr(a_ , "num_decoder_layers" , a_ )
or config.num_hidden_layers
)
__snake_case = getattr(a_ , "num_kv_heads" , config.num_attention_heads )
__snake_case = getattr(a_ , "d_model" , config.hidden_size )
__snake_case = embed_dim // num_attention_heads
__snake_case = outputs["past_key_values"]
self.assertEqual(len(a_ ) , a_ )
__snake_case , __snake_case = inputs["input_ids"].shape
for i in range(a_ ):
if config.new_decoder_architecture:
__snake_case = config.num_attention_heads
elif config.multi_query:
__snake_case = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
__snake_case = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
__snake_case = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=19 )
__snake_case = tokenizer.batch_decode(a_ )[0]
self.assertEqual(a_ , a_ )
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__snake_case = AutoTokenizer.from_pretrained(a_ )
__snake_case = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a_ , do_sample=a_ , max_new_tokens=4 )
model.generate(**a_ , do_sample=a_ , max_new_tokens=4 )
model.generate(**a_ , num_beams=2 , max_new_tokens=4 )
@slow
def A ( self : Any ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__snake_case = AutoTokenizer.from_pretrained(a_ )
__snake_case = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(device=a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
# Test results are the same with and without cache
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ )
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 69 | 1 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
A = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __UpperCAmelCase ( __A , __A ) -> Dict:
'''simple docstring'''
inspect_dataset(__A , __A )
UpperCAmelCase__ = path + ".py"
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __UpperCAmelCase ( __A , __A ) -> Any:
'''simple docstring'''
inspect_metric(__A , __A )
UpperCAmelCase__ = path + ".py"
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __UpperCAmelCase ( __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __UpperCAmelCase ( __A , __A , __A ) -> Dict:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __UpperCAmelCase ( __A , __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __UpperCAmelCase ( __A , __A , __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase__ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __UpperCAmelCase ( __A , __A , __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = get_dataset_infos(__A )
assert expected_config in infos
UpperCAmelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __UpperCAmelCase ( __A , __A , __A ) -> Optional[Any]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 721 |
from __future__ import annotations
def __UpperCAmelCase ( __A ) -> list[int]:
'''simple docstring'''
UpperCAmelCase__ = [True] * limit
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCAmelCase__ = i * 2
while index < limit:
UpperCAmelCase__ = False
UpperCAmelCase__ = index + i
UpperCAmelCase__ = [2]
for i in range(3 , __A , 2 ):
if is_prime[i]:
primes.append(__A )
return primes
def __UpperCAmelCase ( __A = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
UpperCAmelCase__ = prime_sieve(__A )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
for i in range(len(__A ) ):
for j in range(i + length , len(__A ) ):
UpperCAmelCase__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCAmelCase__ = j - i
UpperCAmelCase__ = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 277 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def A_ ( snake_case ):
return getitem, k
def A_ ( snake_case , snake_case ):
return setitem, k, v
def A_ ( snake_case ):
return delitem, k
def A_ ( snake_case , snake_case , *snake_case ):
try:
return fun(snake_case , *snake_case ), None
except Exception as e:
return None, e
A_ = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
A_ = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
A_ = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
A_ = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
A_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
A_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = HashMap(initial_block_size=4 )
SCREAMING_SNAKE_CASE:Tuple = {}
for _, (fun, *args) in enumerate(snake_case ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = _run_operation(snake_case , snake_case , *snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:str = _run_operation(snake_case , snake_case , *snake_case )
assert my_res == py_res
assert str(snake_case ) == str(snake_case )
assert set(snake_case ) == set(snake_case )
assert len(snake_case ) == len(snake_case )
assert set(my.items() ) == set(py.items() )
def A_ ( ):
def is_public(snake_case ) -> bool:
return not name.startswith("_" )
SCREAMING_SNAKE_CASE:int = {name for name in dir({} ) if is_public(snake_case )}
SCREAMING_SNAKE_CASE:Optional[Any] = {name for name in dir(HashMap() ) if is_public(snake_case )}
assert dict_public_names > hash_public_names
| 143 |
'''simple docstring'''
def A_ ( snake_case , snake_case ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
SCREAMING_SNAKE_CASE:int = str(bin(snake_case ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE:Dict = str(bin(snake_case ) )[2:]
SCREAMING_SNAKE_CASE:List[Any] = max(len(snake_case ) , len(snake_case ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case ) , b_binary.zfill(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
UpperCAmelCase = Vector()
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__a ) , '''(0,0,0,0,0,1)''' )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Vector([1, 2, 3, 4] )
self.assertEqual(len(__a ) , 4 )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Vector([1, 2] )
UpperCAmelCase = Vector([1, 2, 3, 4, 5] )
UpperCAmelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCAmelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Vector([1, 2, 3] )
UpperCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Vector([1, 2, 3] )
UpperCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Vector([1, 2, 3] )
UpperCAmelCase = Vector([2, -1, 4] ) # for test of dot product
UpperCAmelCase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def snake_case_ ( self ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def snake_case_ ( self ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Vector([1, 2, 3] )
UpperCAmelCase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __a , __a ) ) , '''(3,4,7)''' )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Vector([1, 0, 0, 0, 0, 0] )
UpperCAmelCase = x.copy()
self.assertEqual(str(__a ) , str(__a ) )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__a ) , '''(0,1,0)''' )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(__a ) )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__a , __a ) )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__a , __a ) )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
UpperCAmelCase = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(__a ) )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def snake_case_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def snake_case_ ( self ) -> None:
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 707 |
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase = str(bin(A__ ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase = str(bin(A__ ) )[2:]
if shift_amount >= len(A__ ):
return "0b0"
UpperCAmelCase = binary_number[: len(A__ ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
UpperCAmelCase = '''0''' + str(bin(A__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase = len(bin(A__ )[3:] ) # Find 2's complement of number
UpperCAmelCase = bin(abs(A__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase = (
'''1''' + '''0''' * (binary_number_length - len(A__ )) + binary_number
)
if shift_amount >= len(A__ ):
return "0b" + binary_number[0] * len(A__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(A__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 97 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A_ ( _UpperCAmelCase = 2_00_00_00 ):
SCREAMING_SNAKE_CASE_: list[int] = [0]
SCREAMING_SNAKE_CASE_: int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
SCREAMING_SNAKE_CASE_: int = 0
# the area corresponding to the grid that gives the product closest to target
SCREAMING_SNAKE_CASE_: int = 0
# an estimate of b, using the quadratic formula
SCREAMING_SNAKE_CASE_: float
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_floor
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_ceil
SCREAMING_SNAKE_CASE_: int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor]
SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a
SCREAMING_SNAKE_CASE_: int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a
SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowerCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCAmelCase = None
class lowerCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCAmelCase = PandasConfig
def A ( self ) -> List[str]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def A ( self , snake_case_ ) -> Any:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
__lowercase = data_files
if isinstance(snake_case_ , snake_case_ ):
__lowercase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
__lowercase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={'''files''': files} ) )
return splits
def A ( self , snake_case_ ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowercase = table_cast(snake_case_ , self.config.features.arrow_schema )
return pa_table
def A ( self , snake_case_ ) -> int:
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
with open(snake_case_ , '''rb''' ) as f:
__lowercase = pa.Table.from_pandas(pd.read_pickle(snake_case_ ) )
yield i, self._cast_table(snake_case_ )
| 710 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : int = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "efficientformer"
def __init__( self , snake_case_ = [3, 2, 6, 4] , snake_case_ = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case_ = [True, True, True, True] , snake_case_ = 4_4_8 , snake_case_ = 3_2 , snake_case_ = 4 , snake_case_ = 7 , snake_case_ = 5 , snake_case_ = 8 , snake_case_ = 4 , snake_case_ = 0.0 , snake_case_ = 1_6 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 2 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1 , snake_case_ = True , snake_case_ = True , snake_case_ = 1e-5 , snake_case_ = "gelu" , snake_case_ = 0.0_2 , snake_case_ = 1e-1_2 , snake_case_ = 2_2_4 , snake_case_ = 1e-0_5 , **snake_case_ , ) -> None:
'''simple docstring'''
super().__init__(**snake_case_ )
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = hidden_sizes
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = patch_size
__lowercase = num_channels
__lowercase = depths
__lowercase = mlp_expansion_ratio
__lowercase = downsamples
__lowercase = dim
__lowercase = key_dim
__lowercase = attention_ratio
__lowercase = resolution
__lowercase = pool_size
__lowercase = downsample_patch_size
__lowercase = downsample_stride
__lowercase = downsample_pad
__lowercase = drop_path_rate
__lowercase = num_metaad_blocks
__lowercase = distillation
__lowercase = use_layer_scale
__lowercase = layer_scale_init_value
__lowercase = image_size
__lowercase = batch_norm_eps
| 527 | 0 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
class lowercase_ ( snake_case_ ):
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__=1_2_5 , UpperCamelCase__=None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase_ = len(set(filter(lambda UpperCamelCase__ : bool("extra_id" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
UpperCAmelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
UpperCAmelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
UpperCAmelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase_ = extra_ids
UpperCAmelCase_ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase_ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase_ = len(self.special_tokens_encoder )
UpperCAmelCase_ = len(UpperCamelCase__ )
for i, token in enumerate(UpperCamelCase__ ):
UpperCAmelCase_ = self.vocab_size + i - n
UpperCAmelCase_ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + [1]
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
if len(UpperCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = self._add_eos_if_not_present(UpperCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase_ = self._add_eos_if_not_present(UpperCamelCase__ )
return token_ids_a + token_ids_a
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [chr(UpperCamelCase__ ) for i in text.encode("utf-8" )]
return tokens
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
if token in self.special_tokens_encoder:
UpperCAmelCase_ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase_ = self.added_tokens_encoder[token]
elif len(UpperCamelCase__ ) != 1:
UpperCAmelCase_ = self.unk_token_id
else:
UpperCAmelCase_ = ord(UpperCamelCase__ ) + self._num_special_tokens
return token_id
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
if index in self.special_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[index]
else:
UpperCAmelCase_ = chr(index - self._num_special_tokens )
return token
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
UpperCAmelCase_ = B''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
UpperCAmelCase_ = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
UpperCAmelCase_ = token.encode("utf-8" )
else:
UpperCAmelCase_ = bytes([ord(UpperCamelCase__ )] )
bstring += tok_string
UpperCAmelCase_ = bstring.decode("utf-8" , errors="ignore" )
return string
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
return ()
| 660 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
@property
def snake_case__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
@property
def snake_case__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
A__ = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,)
return model
@property
def snake_case__ ( self ) -> int:
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
return CLIPTextModel(lowerCamelCase_ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.dummy_uncond_unet
A__ = DDIMScheduler()
A__ = self.dummy_vq_model
A__ = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
A__ = torch.manual_seed(0 )
A__ = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type='numpy' ).images
A__ = torch.manual_seed(0 )
A__ = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type='numpy' ,return_dict=lowerCamelCase_ )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
A__ = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Union[str, Any]:
A__ = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
A__ = torch.manual_seed(0 )
A__ = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type='numpy' ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A__ = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
A__ = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 718 | """simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__lowerCamelCase = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
__lowerCamelCase = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCamelCase = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
__lowerCamelCase = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
__lowerCamelCase = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
__lowerCamelCase = ""
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
A__ = ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ , suppress_parsing_errors=UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = expected_error.format(path=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
A__ = ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = expected_error.format(path=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ , suppress_parsing_errors=UpperCamelCase__ )
| 536 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.