code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import torch
from torch import nn
class lowercase__( nn.Module ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : int=False ) -> Optional[Any]:
super().__init__()
lowercase_ = n_token
lowercase_ = d_embed
lowercase_ = d_proj
lowercase_ = cutoffs + [n_token]
lowercase_ = [0] + self.cutoffs
lowercase_ = div_val
lowercase_ = self.cutoffs[0]
lowercase_ = len(self.cutoffs ) - 1
lowercase_ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase_ = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase_ = nn.ModuleList()
lowercase_ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase ) ) )
else:
self.out_projs.append(_lowerCamelCase )
self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase ) ) )
self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx ) )
lowercase_ = keep_order
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
if proj is None:
lowercase_ = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase_ = nn.functional.linear(_lowerCamelCase , proj.t().contiguous() )
lowercase_ = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> int:
if labels is not None:
# Shift so that tokens < n predict n
lowercase_ = hidden[..., :-1, :].contiguous()
lowercase_ = labels[..., 1:].contiguous()
lowercase_ = hidden.view(-1 , hidden.size(-1 ) )
lowercase_ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
lowercase_ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase_ = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase_ = labels != -1_0_0
lowercase_ = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device )
lowercase_ = (
-nn.functional.log_softmax(_lowerCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase_ = nn.functional.log_softmax(_lowerCamelCase , dim=-1 )
else:
# construct weights and biases
lowercase_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ = self.out_layers[0].weight[l_idx:r_idx]
lowercase_ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase_ = self.out_layers[i].weight
lowercase_ = self.out_layers[i].bias
if i == 0:
lowercase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_lowerCamelCase )
biases.append(_lowerCamelCase )
lowercase_ = weights[0], biases[0], self.out_projs[0]
lowercase_ = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase_ = nn.functional.log_softmax(_lowerCamelCase , dim=1 )
if labels is None:
lowercase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase_ = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device )
lowercase_ = 0
lowercase_ = [0] + self.cutoffs
for i in range(len(_lowerCamelCase ) - 1 ):
lowercase_ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase_ = (labels >= l_idx) & (labels < r_idx)
lowercase_ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase_ = labels.index_select(0 , _lowerCamelCase ) - l_idx
lowercase_ = head_logprob.index_select(0 , _lowerCamelCase )
lowercase_ = hidden.index_select(0 , _lowerCamelCase )
else:
lowercase_ = hidden
if i == 0:
if labels is not None:
lowercase_ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase_ = head_logprob[:, : self.cutoffs[0]]
else:
lowercase_ = weights[i], biases[i], self.out_projs[i]
lowercase_ = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase_ = nn.functional.log_softmax(_lowerCamelCase , dim=1 )
lowercase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase_ = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _lowerCamelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
if self.n_clusters == 0:
lowercase_ = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_lowerCamelCase , dim=-1 )
else:
# construct weights and biases
lowercase_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ = self.out_layers[0].weight[l_idx:r_idx]
lowercase_ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase_ = self.out_layers[i].weight
lowercase_ = self.out_layers[i].bias
if i == 0:
lowercase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_lowerCamelCase )
biases.append(_lowerCamelCase )
lowercase_ = weights[0], biases[0], self.out_projs[0]
lowercase_ = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase_ = nn.functional.log_softmax(_lowerCamelCase , dim=1 )
lowercase_ = [0] + self.cutoffs
for i in range(len(_lowerCamelCase ) - 1 ):
lowercase_ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase_ = head_logprob[:, : self.cutoffs[0]]
else:
lowercase_ = weights[i], biases[i], self.out_projs[i]
lowercase_ = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase_ = nn.functional.log_softmax(_lowerCamelCase , dim=1 )
lowercase_ = head_logprob[:, -i] + tail_logprob_i
lowercase_ = logprob_i
return out
| 30 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]="attention" ):
"""simple docstring"""
a :Optional[int] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
a :int = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=False ):
"""simple docstring"""
if split_mlp_wi:
a :int = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
a :Dict = (wi_a, wi_a)
else:
a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
a :Dict = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def __lowerCamelCase ( UpperCAmelCase_ : dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : bool ):
"""simple docstring"""
a :str = traverse_util.flatten_dict(variables['''target'''] )
a :Any = {'''/'''.join(UpperCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a :Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase_ )
a :Optional[Any] = collections.OrderedDict()
# Shared embeddings.
a :Union[str, Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase_ ):
# Block i, layer 0 (Self Attention).
a :Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' )
a , a , a , a :Optional[int] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''attention''' )
a :List[Any] = layer_norm
a :str = k.T
a :Dict = o.T
a :int = q.T
a :Optional[Any] = v.T
# Block i, layer 1 (MLP).
a :Tuple = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' )
a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , UpperCAmelCase_ )
a :Any = layer_norm
if split_mlp_wi:
a :Any = wi[0].T
a :Tuple = wi[1].T
else:
a :List[str] = wi.T
a :List[Any] = wo.T
a :Union[str, Any] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
a :Optional[Any] = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase_ ):
# Block i, layer 0 (Self Attention).
a :List[str] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
a , a , a , a :List[Any] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''self_attention''' )
a :List[Any] = layer_norm
a :Tuple = k.T
a :int = o.T
a :Any = q.T
a :Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
a :str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
a , a , a , a :Any = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' )
a :str = layer_norm
a :Optional[Any] = k.T
a :Any = o.T
a :Dict = q.T
a :Optional[Any] = v.T
# Block i, layer 2 (MLP).
a :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' )
a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , UpperCAmelCase_ )
a :Optional[int] = layer_norm
if split_mlp_wi:
a :int = wi[0].T
a :Tuple = wi[1].T
else:
a :str = wi.T
a :Dict = wo.T
a :Any = old['''decoder/decoder_norm/scale''']
a :Optional[Any] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : bool ):
"""simple docstring"""
a :List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a :Optional[Any] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a :Tuple = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
a :Optional[Any] = state_dict['''shared.weight''']
return state_dict
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
a :Optional[int] = convert_tax_to_pytorch(UpperCAmelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase_ )
a :Tuple = make_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
a :List[Any] = TaConfig.from_json_file(UpperCAmelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a :Any = TaEncoderModel(UpperCAmelCase_ )
else:
a :List[str] = TaForConditionalGeneration(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase_ )
print('''Done''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
snake_case : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 94 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase: Dict = 16
_lowercase: Optional[Any] = 32
def a( A : Accelerator , A : int = 16 ) -> List[Any]:
"""simple docstring"""
a = AutoTokenizer.from_pretrained("bert-base-cased" )
a = load_dataset("glue" , "mrpc" )
def tokenize_function(A : Any ):
# max_length=None => use the model max length (it's actually the default)
a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a = 16
elif accelerator.mixed_precision != "no":
a = 8
else:
a = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
a = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , drop_last=lowerCamelCase__ )
a = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def a( A : List[str] , A : int ) -> Optional[Any]:
"""simple docstring"""
a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config['''lr''']
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a = batch_size // MAX_GPU_BATCH_SIZE
a = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase__ )
a = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Instantiate optimizer
a = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
a = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a = model(**lowerCamelCase__ )
a = outputs.loss
a = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a = model(**lowerCamelCase__ )
a = outputs.logits.argmax(dim=-1 )
a = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCamelCase__ )
def a( ) -> Optional[Any]:
"""simple docstring"""
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a = parser.parse_args()
a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 354 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: Optional[Any] = logging.get_logger(__name__)
_lowercase: Any = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "git_vision_model"
def __init__(self , lowerCamelCase_=768 , lowerCamelCase_=3072 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=3 , lowerCamelCase_=224 , lowerCamelCase_=16 , lowerCamelCase_="quick_gelu" , lowerCamelCase_=1E-5 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
a = num_channels
a = patch_size
a = image_size
a = initializer_range
a = attention_dropout
a = layer_norm_eps
a = hidden_act
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase_ )
a , a = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "git"
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=30522 , lowerCamelCase_=768 , lowerCamelCase_=6 , lowerCamelCase_=12 , lowerCamelCase_=3072 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1024 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-1_2 , lowerCamelCase_=0 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=101 , lowerCamelCase_=102 , lowerCamelCase_=None , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
a = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
a = GitVisionConfig(**lowerCamelCase_ )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = tie_word_embeddings
a = num_image_with_embedding
a = bos_token_id
a = eos_token_id
def UpperCamelCase_ (self ):
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = self.vision_config.to_dict()
a = self.__class__.model_type
return output
| 71 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''informer'''
_lowerCamelCase: Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Union[str, Any] ,A_ : Optional[int] = None ,A_ : Optional[int] = None ,A_ : str = "student_t" ,A_ : str = "nll" ,A_ : int = 1 ,A_ : List[int] = None ,A_ : Optional[Union[str, bool]] = "mean" ,A_ : int = 0 ,A_ : int = 0 ,A_ : int = 0 ,A_ : int = 0 ,A_ : Optional[List[int]] = None ,A_ : Optional[List[int]] = None ,A_ : int = 64 ,A_ : int = 32 ,A_ : int = 32 ,A_ : int = 2 ,A_ : int = 2 ,A_ : int = 2 ,A_ : int = 2 ,A_ : bool = True ,A_ : str = "gelu" ,A_ : float = 0.05 ,A_ : float = 0.1 ,A_ : float = 0.1 ,A_ : float = 0.1 ,A_ : float = 0.1 ,A_ : int = 100 ,A_ : float = 0.02 ,A_ : Tuple=True ,A_ : str = "prob" ,A_ : int = 5 ,A_ : bool = True ,**A_ : Any ,) -> Dict:
# time series specific configuration
A = prediction_length
A = context_length or prediction_length
A = distribution_output
A = loss
A = input_size
A = num_time_features
A = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A = scaling
A = num_dynamic_real_features
A = num_static_real_features
A = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A = cardinality
else:
A = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A = embedding_dimension
else:
A = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
A = num_parallel_samples
# Transformer architecture configuration
A = input_size * len(self.lags_sequence ) + self._number_of_features
A = d_model
A = encoder_attention_heads
A = decoder_attention_heads
A = encoder_ffn_dim
A = decoder_ffn_dim
A = encoder_layers
A = decoder_layers
A = dropout
A = attention_dropout
A = activation_dropout
A = encoder_layerdrop
A = decoder_layerdrop
A = activation_function
A = init_std
A = use_cache
# Informer
A = attention_type
A = sampling_factor
A = distil
super().__init__(is_encoder_decoder=A_ ,**A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 74 |
import sys
_SCREAMING_SNAKE_CASE = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE__ ( __a = N ):
snake_case_ : Optional[Any] = -sys.maxsize - 1
for i in range(len(__a ) - 12 ):
snake_case_ : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ : int = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def a__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = 10
UpperCAmelCase_ : List[str] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(_SCREAMING_SNAKE_CASE ) ),
} , features=_SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=_SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
_lowerCamelCase = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("data" ) / "file.txt"
UpperCAmelCase_ : List[str] = FILE_CONTENT
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import bza
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
UpperCAmelCase_ : str = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with bza.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
import gzip
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
UpperCAmelCase_ : Dict = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with gzip.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
UpperCAmelCase_ : Tuple = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with lza.frame.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(_SCREAMING_SNAKE_CASE , "w" ) as archive:
archive.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
import tarfile
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
import lzma
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
UpperCAmelCase_ : List[Any] = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with lzma.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
import zipfile
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
UpperCAmelCase_ : List[str] = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with zstd.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = tmp_path_factory.mktemp("data" ) / "file.xml"
UpperCAmelCase_ : Optional[Any] = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return filename
_lowerCamelCase = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
_lowerCamelCase = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
_lowerCamelCase = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
_lowerCamelCase = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
_lowerCamelCase = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="session" )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = datasets.Dataset.from_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con:
UpperCAmelCase_ : Any = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(_SCREAMING_SNAKE_CASE , "w" , newline="" ) as f:
UpperCAmelCase_ : int = csv.DictWriter(_SCREAMING_SNAKE_CASE , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(_SCREAMING_SNAKE_CASE , "w" , newline="" ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(_SCREAMING_SNAKE_CASE , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
"""simple docstring"""
import bza
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
UpperCAmelCase_ : List[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
UpperCAmelCase_ : List[Any] = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
UpperCAmelCase_ : Dict = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=_SCREAMING_SNAKE_CASE )
writer.write_table(_SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
UpperCAmelCase_ : Optional[Any] = {"data": DATA}
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
UpperCAmelCase_ : List[Any] = {"data": DATA_DICT_OF_LISTS}
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
import gzip
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(_SCREAMING_SNAKE_CASE , "rb" ) as orig_file:
with gzip.open(_SCREAMING_SNAKE_CASE , "wb" ) as zipped_file:
zipped_file.writelines(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
import gzip
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(_SCREAMING_SNAKE_CASE , "rb" ) as orig_file:
with gzip.open(_SCREAMING_SNAKE_CASE , "wb" ) as zipped_file:
zipped_file.writelines(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("nested" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.join("nested" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = ["0", "1", "2", "3"]
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ["0", "1", "2", "3"]
UpperCAmelCase_ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["0", "1", "2", "3"]
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : int = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename("unsupported.ext" ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( ) -> List[str]:
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def a__ ( ) -> Dict:
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : str = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 67 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> str | Literal[False]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase_ : List[str] = "_"
if count > 1:
return False
else:
return "".join(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : list[str] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = []
while True:
UpperCAmelCase_ : Any = ["$"] * len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase_ : Union[str, Any] = "*"
UpperCAmelCase_ : List[Any] = "*"
temp.append("X" )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return pi
UpperCAmelCase_ : str = list(set(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Sequence[float] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
for minterm in minterms:
UpperCAmelCase_ : Optional[Any] = ""
for _ in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_SCREAMING_SNAKE_CASE )
return temp
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[str] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : str = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[int] = -1
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase_ : Any = j
if count == 1:
UpperCAmelCase_ : Any = 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : int = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : int = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : str = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase_ : List[str] = count_n
UpperCAmelCase_ : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : int = 0
def a__ ( _SCREAMING_SNAKE_CASE : list[str] , _SCREAMING_SNAKE_CASE : list[str] ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase_ : Dict = [[0 for x in range(len(_SCREAMING_SNAKE_CASE ) )] for x in range(len(_SCREAMING_SNAKE_CASE ) )]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Optional[int] = prime_implicants[i].count("_" )
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = 1
return chart
def a__ ( ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = int(input("Enter the no. of variables\n" ) )
UpperCAmelCase_ : Tuple = [
float(_SCREAMING_SNAKE_CASE )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
UpperCAmelCase_ : int = decimal_to_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = check(_SCREAMING_SNAKE_CASE )
print("Prime Implicants are:" )
print(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = prime_implicant_chart(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = selection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print("Essential Prime Implicants are:" )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 67 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class A_ ( snake_case__ ):
_lowercase : Union[str, Any] = 'speech_to_text_2'
_lowercase : List[Any] = ['past_key_values']
_lowercase : Any = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , UpperCAmelCase : List[Any]=1_0_0_0_0 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : Optional[Any]=2_0_4_8 , UpperCAmelCase : int=4 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : Optional[Any]=2_5_6 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : int=0.02 , UpperCAmelCase : Any=2 , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : Any=2 , UpperCAmelCase : Tuple=1_0_2_4 , **UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[Any] = vocab_size
__lowerCAmelCase: List[str] = d_model
__lowerCAmelCase: List[str] = decoder_ffn_dim
__lowerCAmelCase: Optional[Any] = decoder_layers
__lowerCAmelCase: Optional[Any] = decoder_attention_heads
__lowerCAmelCase: List[Any] = dropout
__lowerCAmelCase: Union[str, Any] = attention_dropout
__lowerCAmelCase: str = activation_dropout
__lowerCAmelCase: str = activation_function
__lowerCAmelCase: Tuple = init_std
__lowerCAmelCase: Optional[Any] = decoder_layerdrop
__lowerCAmelCase: str = use_cache
__lowerCAmelCase: Dict = decoder_layers
__lowerCAmelCase: List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase: str = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , )
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] )
__lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase: List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: int = 1 + min([right, diagonal, down] )
__lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase: int = [0]
__lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase: Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1]
__lowerCAmelCase: str = dp_array[row + 1][col + 1]
__lowerCAmelCase: Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * (cols + 1)
__lowerCAmelCase: Optional[int] = [0] * (cols + 1)
__lowerCAmelCase: str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: int = current_row[col + 1]
__lowerCAmelCase: Union[str, Any] = next_row[col + 1]
__lowerCAmelCase: Any = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 322 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowerCAmelCase: Dict = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = CLIPConfig
lowercase__ = ["""CLIPEncoderLayer"""]
def __init__( self : Dict , __snake_case : CLIPConfig ):
super().__init__(__snake_case )
a : Optional[int] = CLIPVisionModelWithProjection(config.vision_config )
a : List[str] = nn.Linear(config.vision_config.projection_dim , 1 )
a : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowercase_ ( self : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Tuple=0.5 , __snake_case : Optional[int]=0.5 ):
a : Any = self.vision_model(__snake_case )[0]
a : Tuple = self.p_head(__snake_case )
a : Optional[Any] = nsfw_detected.flatten()
a : Optional[int] = nsfw_detected > p_threshold
a : Union[str, Any] = nsfw_detected.tolist()
if any(__snake_case ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(__snake_case ):
if nsfw_detected_:
a : Tuple = np.zeros(images[idx].shape )
a : List[Any] = self.w_head(__snake_case )
a : Dict = watermark_detected.flatten()
a : str = watermark_detected > w_threshold
a : int = watermark_detected.tolist()
if any(__snake_case ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(__snake_case ):
if watermark_detected_:
a : Dict = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 350 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase__ ( _A , _A , _A=0 ):
# Format the message.
if name is None:
a : Tuple = None
else:
a : Dict = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
a : Tuple = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , ':' , val.size() )
else:
print(_A , ':' , _A )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
a : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
a : List[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
a : int = param.view(*_A )
a : List[str] = param.transpose(0 , 2 )
a : Union[str, Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
a : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
a : List[str] = param.view(*_A )
a : Union[str, Any] = param.transpose(0 , 1 ).contiguous()
a : List[Any] = param.view(*_A )
return param
def lowerCamelCase__ ( _A , _A , _A ):
# The converted output model.
a : Optional[Any] = {}
# old versions did not store training args
a : Dict = input_state_dict.get('args' , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
a : Union[str, Any] = ds_args.padded_vocab_size
a : str = ds_args.max_position_embeddings
a : Dict = ds_args.hidden_size
a : Union[str, Any] = ds_args.num_layers
a : Dict = ds_args.num_attention_heads
a : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
a : Any = config.n_head
# The hidden_size per head.
a : Tuple = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
a : Any = input_state_dict['checkpoint_version']
else:
a : Any = 0.0
# The model.
a : Optional[int] = input_state_dict['model']
# The language model.
a : Optional[Any] = model['language_model']
# The embeddings.
a : List[str] = lm['embedding']
# The word embeddings.
a : List[Any] = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
a : Dict = word_embeddings[: config.vocab_size, :]
a : int = word_embeddings
# The position embeddings.
a : Tuple = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
a : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
a : Optional[Any] = pos_embeddings
# The transformer.
a : Union[str, Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
a : List[Any] = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
a : Optional[Any] = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
a : Tuple = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
a : Union[str, Any] = int(m.group(1 ) )
# The name of the operation.
a : Optional[int] = m.group(2 )
# Is it a weight or a bias?
a : Optional[int] = m.group(3 )
# The name of the layer.
a : Any = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
a : str = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
a : Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
a : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
a : Optional[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
a : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
a : List[str] = masked_bias
a : Union[str, Any] = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
a : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
a : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
a : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
a : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
a : Tuple = megatron_to_transformers[op_name]
a : List[str] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
a : Dict = megatron_to_transformers[op_name]
a : Optional[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
a : str = transformer['final_layernorm.weight']
a : List[str] = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
a : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase__ ( ):
# Create the argument parser.
a : Dict = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_A , help='An optional config json file describing the pre-trained model.' , )
a : Union[str, Any] = parser.parse_args()
# Extract the basename.
a : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
a : Union[str, Any] = torch.load(_A , map_location='cpu' )
else:
a : Any = torch.load(args.path_to_checkpoint , map_location='cpu' )
a : List[Any] = input_state_dict.get('args' , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
a : int = 'gelu_fast'
elif ds_args.openai_gelu:
a : Dict = 'gelu_new'
else:
a : Any = 'gelu'
else:
# in the very early days this used to be "gelu_new"
a : Any = 'gelu_new'
# Spell out all parameters in case the defaults change.
a : Tuple = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
a : str = GPTaConfig.from_json_file(args.config_file )
a : Any = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
a : Union[str, Any] = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
a : Union[str, Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
a : Tuple = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
a : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
a : Optional[Any] = 'gpt2'
a : Tuple = AutoTokenizer.from_pretrained(_A )
a : str = type(_A ).__name__
a : List[str] = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_A )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
a : Optional[int] = os.path.join(_A , 'pytorch_model.bin' )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
#################################################################################################### | 96 | 0 |
def _SCREAMING_SNAKE_CASE ( a ) -> list[int]:
__A : List[Any] = [0 for i in range(len(a ) )]
# initialize interval's left pointer and right pointer
__A , __A : List[Any] = 0, 0
for i in range(1 , len(a ) ):
# case when current index is inside the interval
if i <= right_pointer:
__A : Optional[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__A : Dict = min_edge
while go_next(a , a , a ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__A , __A : Optional[Any] = i, i + z_result[i] - 1
return z_result
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> bool:
return i + z_result[i] < len(a ) and s[z_result[i]] == s[i + z_result[i]]
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
__A : Union[str, Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__A : Optional[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(a ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _SCREAMING_SNAKE_CASE ( a , a , a=8 ) -> Tuple:
__A : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__A : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _SCREAMING_SNAKE_CASE ( a , a=5_12 , a=5_12 ) -> int:
__A : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__A : Union[str, Any] = np.array(pil_image.convert('RGB' ) )
__A : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
__A : int = np.transpose(a , [2, 0, 1] )
__A : Tuple = torch.from_numpy(a ).unsqueeze(0 )
return image
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__A : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__A : Optional[int] = min(int(num_inference_steps * strength ) , _A )
__A : Dict = max(num_inference_steps - init_timestep , 0 )
__A : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__A : Union[str, Any] = image.to(device=_A , dtype=_A )
__A : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__A : int = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
__A : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__A : str = torch.cat(_A , dim=0 )
else:
__A : List[str] = self.movq.encode(_A ).latent_dist.sample(_A )
__A : Tuple = self.movq.config.scaling_factor * init_latents
__A : Optional[int] = torch.cat([init_latents] , dim=0 )
__A : Union[str, Any] = init_latents.shape
__A : List[str] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__A : Optional[Any] = self.scheduler.add_noise(_A , _A , _A )
__A : Optional[int] = init_latents
return latents
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__A : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
__A : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__A : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__A : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
__A , __A : Optional[int] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
__A : List[Any] = self._execution_device
__A : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__A : Optional[Any] = torch.cat(_A , dim=0 )
__A : Tuple = image_embeds.shape[0]
if isinstance(_A , _A ):
__A : List[Any] = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__A : Union[str, Any] = image_embeds.repeat_interleave(_A , dim=0 )
__A : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__A : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__A : List[Any] = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__A : Dict = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__A : Any = image.to(dtype=image_embeds.dtype , device=_A )
__A : Tuple = self.movq.encode(_A )['latents']
__A : int = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__A , __A : int = self.get_timesteps(_A , _A , _A )
__A : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__A , __A : Any = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__A : Tuple = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__A : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = {'image_embeds': image_embeds}
__A : List[str] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__A , __A : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__A , __A : Optional[Any] = noise_pred.chunk(2 )
__A , __A : List[str] = variance_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__A : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__A , __A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__A : List[str] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__A : List[Any] = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__A : List[str] = image * 0.5 + 0.5
__A : List[str] = image.clamp(0 , 1 )
__A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : Any = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 280 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: str = StableDiffusionControlNetImgaImgPipeline
A: Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
A: Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A: List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"})
A: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCamelCase__ : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ : Dict = CLIPTextModel(lowerCamelCase__ )
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : List[str] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple=0 ) -> int:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('''mps''' ):
UpperCamelCase__ : Any = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ : Dict = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ : Any = 2
UpperCamelCase__ : List[str] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase__ , device=torch.device(lowerCamelCase__ ) , )
UpperCamelCase__ : List[Any] = floats_tensor(control_image.shape , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCamelCase__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ : List[str] = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: Any = StableDiffusionControlNetImgaImgPipeline
A: Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
A: Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A: Optional[int] = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCamelCase__ : Any ):
if isinstance(lowerCamelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase__ : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase__ )
torch.manual_seed(0 )
UpperCamelCase__ : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase__ )
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
UpperCamelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ : Tuple = CLIPTextModel(lowerCamelCase__ )
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : Union[str, Any] = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase__ : Dict = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('''mps''' ):
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ : Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = 2
UpperCamelCase__ : Optional[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase__ , device=torch.device(lowerCamelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase__ , device=torch.device(lowerCamelCase__ ) , ),
]
UpperCamelCase__ : Optional[Any] = floats_tensor(control_image[0].shape , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCamelCase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCAmelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : int = self.get_dummy_components()
UpperCamelCase__ : Dict = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = 10.0
UpperCamelCase__ : int = 4
UpperCamelCase__ : Tuple = self.get_dummy_inputs(lowerCamelCase__ )
UpperCamelCase__ : int = steps
UpperCamelCase__ : Union[str, Any] = scale
UpperCamelCase__ : Union[str, Any] = pipe(**lowerCamelCase__ )[0]
UpperCamelCase__ : str = self.get_dummy_inputs(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = steps
UpperCamelCase__ : Optional[int] = scale
UpperCamelCase__ : Union[str, Any] = pipe(**lowerCamelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCamelCase__ : List[str] = self.get_dummy_inputs(lowerCamelCase__ )
UpperCamelCase__ : int = steps
UpperCamelCase__ : Dict = scale
UpperCamelCase__ : Dict = pipe(**lowerCamelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase__ : Dict = self.get_dummy_inputs(lowerCamelCase__ )
UpperCamelCase__ : str = steps
UpperCamelCase__ : List[Any] = scale
UpperCamelCase__ : Dict = pipe(**lowerCamelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCamelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Any = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCamelCase__ : Any = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCamelCase__ , controlnet=lowerCamelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = '''evil space-punk bird'''
UpperCamelCase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCamelCase__ : Optional[Any] = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCamelCase__ : Optional[int] = pipe(
lowerCamelCase__ , lowerCamelCase__ , control_image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCamelCase__ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 51 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Any=False ):
"""simple docstring"""
UpperCamelCase__ : str = '''backbone.''' if is_semantic else ''''''
UpperCamelCase__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", '''beit.embeddings.cls_token'''),
(F"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
UpperCamelCase__ : Union[str, Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCamelCase__ : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
UpperCamelCase__ : Tuple = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
UpperCamelCase__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : Optional[int] = q_bias
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCamelCase__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
UpperCamelCase__ : Any = gamma_a
UpperCamelCase__ : str = gamma_a
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = val
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = False if '''rvlcdip''' in checkpoint_url else True
UpperCamelCase__ : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE , use_mask_token=SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCamelCase__ : List[str] = 1024
UpperCamelCase__ : Union[str, Any] = 4096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCamelCase__ : Any = 16
UpperCamelCase__ : Optional[int] = '''huggingface/label-files'''
UpperCamelCase__ : Union[str, Any] = '''rvlcdip-id2label.json'''
UpperCamelCase__ : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Optional[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ : int = idalabel
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : Optional[int] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
UpperCamelCase__ : str = create_rename_keys(SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
# load HuggingFace model
UpperCamelCase__ : Tuple = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image
UpperCamelCase__ : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = prepare_img()
UpperCamelCase__ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
UpperCamelCase__ : Union[str, Any] = encoding['''pixel_values''']
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = outputs.logits
# verify logits
UpperCamelCase__ : Dict = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
UpperCamelCase__ : Any = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCamelCase__ : Optional[Any] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__UpperCamelCase : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 1 , SCREAMING_SNAKE_CASE : float = 1 , SCREAMING_SNAKE_CASE : float = 1.0E4 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : float = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even"
UpperCamelCase__ : List[str] = float(embedding_dim // 2 )
UpperCamelCase__ : Optional[int] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCamelCase__ : List[Any] = min_timescale * jnp.exp(jnp.arange(SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCamelCase__ : Tuple = jnp.expand_dims(SCREAMING_SNAKE_CASE , 1 ) * jnp.expand_dims(SCREAMING_SNAKE_CASE , 0 )
# scale embeddings
UpperCamelCase__ : str = scale * emb
if flip_sin_to_cos:
UpperCamelCase__ : str = jnp.concatenate([jnp.cos(SCREAMING_SNAKE_CASE ), jnp.sin(SCREAMING_SNAKE_CASE )] , axis=1 )
else:
UpperCamelCase__ : str = jnp.concatenate([jnp.sin(SCREAMING_SNAKE_CASE ), jnp.cos(SCREAMING_SNAKE_CASE )] , axis=1 )
UpperCamelCase__ : Union[str, Any] = jnp.reshape(SCREAMING_SNAKE_CASE , [jnp.shape(SCREAMING_SNAKE_CASE )[0], embedding_dim] )
return signal
class __magic_name__ ( nn.Module):
A: int = 3_2
A: jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Optional[int] , lowerCamelCase__ : Any ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(lowerCamelCase__ )
UpperCamelCase__ : int = nn.silu(lowerCamelCase__ )
UpperCamelCase__ : List[str] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(lowerCamelCase__ )
return temb
class __magic_name__ ( nn.Module):
A: int = 3_2
A: bool = False
A: float = 1
@nn.compact
def __call__( self : List[str] , lowerCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
return get_sinusoidal_embeddings(
lowerCamelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 146 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCamelCase : Dict = trt.Logger(trt.Logger.WARNING)
__UpperCamelCase : Union[str, Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCamelCase : int = logging.getLogger(__name__)
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__UpperCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
__UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__UpperCamelCase : Union[str, Any] = args.per_device_eval_batch_size
__UpperCamelCase : List[str] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCamelCase : Tuple = True
__UpperCamelCase : Union[str, Any] = "temp_engine/bert-fp32.engine"
if args.fpaa:
__UpperCamelCase : str = "temp_engine/bert-fp16.engine"
if args.inta:
__UpperCamelCase : int = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__UpperCamelCase : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCamelCase : Optional[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCamelCase : Optional[int] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCamelCase : List[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCamelCase : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCamelCase : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
UpperCamelCase__ : str = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
UpperCamelCase__ : Optional[Any] = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE )
# start time
UpperCamelCase__ : Union[str, Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE ), int(SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Synchronize the stream and take time
stream.synchronize()
# end time
UpperCamelCase__ : List[Any] = time.time()
UpperCamelCase__ : int = end_time - start_time
UpperCamelCase__ : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCamelCase : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCamelCase : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCamelCase : str = raw_datasets["validation"].column_names
__UpperCamelCase : List[Any] = "question" if "question" in column_names else column_names[0]
__UpperCamelCase : Dict = "context" if "context" in column_names else column_names[1]
__UpperCamelCase : str = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCamelCase : List[Any] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__UpperCamelCase : List[str] = min(args.max_seq_length, tokenizer.model_max_length)
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase__ : Dict = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
UpperCamelCase__ : List[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
UpperCamelCase__ : int = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
UpperCamelCase__ : List[Any] = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
UpperCamelCase__ : Dict = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
UpperCamelCase__ : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
UpperCamelCase__ : Any = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__UpperCamelCase : str = raw_datasets["validation"]
# Validation Feature Creation
__UpperCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__UpperCamelCase : Union[str, Any] = default_data_collator
__UpperCamelCase : List[str] = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__UpperCamelCase : Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]="eval" ):
"""simple docstring"""
UpperCamelCase__ : List[str] = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , predictions=SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
UpperCamelCase__ : List[str] = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
UpperCamelCase__ : Optional[Any] = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
UpperCamelCase__ : int = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE )
__UpperCamelCase : int = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCamelCase : Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCamelCase : List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCamelCase : Any = cuda.mem_alloc(h_outputa.nbytes)
__UpperCamelCase : List[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCamelCase : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__UpperCamelCase : str = 0.0
__UpperCamelCase : int = 0
__UpperCamelCase : List[Any] = timeit.default_timer()
__UpperCamelCase : List[str] = None
for step, batch in enumerate(eval_dataloader):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCamelCase , __UpperCamelCase : Optional[Any] = outputs
__UpperCamelCase : List[str] = torch.tensor(start_logits)
__UpperCamelCase : Optional[Any] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCamelCase : int = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCamelCase : List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCamelCase : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCamelCase : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCamelCase : int = nested_truncate(all_preds, len(eval_dataset))
__UpperCamelCase : Dict = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
__UpperCamelCase : int = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCamelCase : Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 146 | 1 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = '''cpu'''
_SCREAMING_SNAKE_CASE = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_SCREAMING_SNAKE_CASE = '''path-to-your-trained-model'''
_SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_SCREAMING_SNAKE_CASE = pipe.to(device)
# to channels last
_SCREAMING_SNAKE_CASE = pipe.unet.to(memory_format=torch.channels_last)
_SCREAMING_SNAKE_CASE = pipe.vae.to(memory_format=torch.channels_last)
_SCREAMING_SNAKE_CASE = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_SCREAMING_SNAKE_CASE = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_SCREAMING_SNAKE_CASE = torch.randn(2, 4, 6_4, 6_4)
_SCREAMING_SNAKE_CASE = torch.rand(1) * 9_9_9
_SCREAMING_SNAKE_CASE = torch.randn(2, 7_7, 7_6_8)
_SCREAMING_SNAKE_CASE = (sample, timestep, encoder_hidden_status)
try:
_SCREAMING_SNAKE_CASE = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_SCREAMING_SNAKE_CASE = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_SCREAMING_SNAKE_CASE = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_SCREAMING_SNAKE_CASE = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_SCREAMING_SNAKE_CASE = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_SCREAMING_SNAKE_CASE = 6_6_6
_SCREAMING_SNAKE_CASE = torch.Generator(device).manual_seed(seed)
_SCREAMING_SNAKE_CASE = {'''generator''': generator}
if args.steps is not None:
_SCREAMING_SNAKE_CASE = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_SCREAMING_SNAKE_CASE = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 366 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 217 | 0 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1.0E4 , lowerCAmelCase__ = False , lowerCAmelCase__ = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
UpperCAmelCase__ : List[str] = float(embedding_dim // 2 )
UpperCAmelCase__ : Optional[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCAmelCase__ : Optional[int] = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase__ , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCAmelCase__ : Union[str, Any] = jnp.expand_dims(lowerCAmelCase__ , 1 ) * jnp.expand_dims(lowerCAmelCase__ , 0 )
# scale embeddings
UpperCAmelCase__ : List[Any] = scale * emb
if flip_sin_to_cos:
UpperCAmelCase__ : Tuple = jnp.concatenate([jnp.cos(lowerCAmelCase__ ), jnp.sin(lowerCAmelCase__ )] , axis=1 )
else:
UpperCAmelCase__ : str = jnp.concatenate([jnp.sin(lowerCAmelCase__ ), jnp.cos(lowerCAmelCase__ )] , axis=1 )
UpperCAmelCase__ : Tuple = jnp.reshape(lowerCAmelCase__ , [jnp.shape(lowerCAmelCase__ )[0], embedding_dim] )
return signal
class lowerCamelCase_ ( nn.Module ):
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = jnp.floataa
@nn.compact
def __call__( self : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(_A )
UpperCAmelCase__ : int = nn.silu(_A )
UpperCAmelCase__ : Optional[int] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(_A )
return temb
class lowerCamelCase_ ( nn.Module ):
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = False
lowerCAmelCase__ = 1
@nn.compact
def __call__( self : List[Any] , _A : str ):
'''simple docstring'''
return get_sinusoidal_embeddings(
_A , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 181 |
'''simple docstring'''
import math
class lowerCamelCase_ :
def lowercase_ ( self : Optional[Any] , _A : list[list[float]] , _A : list[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 0.0
UpperCAmelCase__ : Union[str, Any] = 0.0
for i in range(len(_A ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase_ ( self : Dict , _A : list[list[int | float]] , _A : list[int] , _A : int , _A : float ):
'''simple docstring'''
for i in range(len(_A ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def a__ ( ) -> None:
# Training Examples ( m, n )
UpperCAmelCase__ : Optional[Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
UpperCAmelCase__ : str = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
UpperCAmelCase__ : Optional[Any] = SelfOrganizingMap()
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : Dict = 0.5
for _ in range(lowerCAmelCase__ ):
for j in range(len(lowerCAmelCase__ ) ):
# training sample
UpperCAmelCase__ : Optional[int] = training_samples[j]
# Compute the winning vector
UpperCAmelCase__ : str = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# Update the winning vector
UpperCAmelCase__ : int = self_organizing_map.update(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# classify test sample
UpperCAmelCase__ : str = [0, 0, 0, 1]
UpperCAmelCase__ : int = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 181 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
UpperCamelCase :Optional[int] = FileLock(str(tmpdir / """foo.lock""" ) )
UpperCamelCase :List[str] = 0.01
with locka.acquire():
with pytest.raises(__magic_name__ ):
UpperCamelCase :Optional[Any] = time.time()
locka.acquire(__magic_name__ )
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = """a""" * 1000 + """.lock"""
UpperCamelCase :List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(__magic_name__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCamelCase :Tuple = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__magic_name__ ):
locka.acquire(0 )
| 369 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any=7 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[Any]=30 , __lowerCamelCase : Union[str, Any]=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=True , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : int=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=1 / 255 , __lowerCamelCase : str=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCamelCase :List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
UpperCamelCase :Tuple = parent
UpperCamelCase :int = batch_size
UpperCamelCase :str = num_channels
UpperCamelCase :Dict = min_resolution
UpperCamelCase :Any = max_resolution
UpperCamelCase :int = do_resize
UpperCamelCase :str = size
UpperCamelCase :Dict = do_normalize
UpperCamelCase :Tuple = image_mean
UpperCamelCase :Optional[int] = image_std
UpperCamelCase :Tuple = do_rescale
UpperCamelCase :Optional[Any] = rescale_factor
UpperCamelCase :List[Any] = do_pad
def _A ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=False ):
if not batched:
UpperCamelCase :Optional[Any] = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
UpperCamelCase , UpperCamelCase :Union[str, Any] = image.size
else:
UpperCamelCase , UpperCamelCase :Optional[int] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase :int = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase :Tuple = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase :List[Any] = self.size["""shortest_edge"""]
UpperCamelCase :str = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase :List[Any] = self.size["""shortest_edge"""]
UpperCamelCase :str = self.size["""shortest_edge"""]
else:
UpperCamelCase :List[Any] = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase :int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase :int = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
UpperCamelCase :Tuple = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Optional[int] = DeformableDetrImageProcessor if is_vision_available() else None
def _A ( self : Optional[Any] ):
UpperCamelCase :str = DeformableDetrImageProcessingTester(self )
@property
def _A ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Dict ):
UpperCamelCase :int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_pad""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
def _A ( self : str ):
UpperCamelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
UpperCamelCase :int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def _A ( self : List[Any] ):
pass
def _A ( self : Dict ):
# Initialize image_processing
UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase :Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :Optional[int] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase :str = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
UpperCamelCase :int = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self : Tuple ):
# Initialize image_processing
UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :Any = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :Dict = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self : Any ):
# Initialize image_processing
UpperCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _A ( self : Optional[Any] ):
# prepare image and target
UpperCamelCase :int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase :str = json.loads(f.read() )
UpperCamelCase :List[Any] = {"""image_id""": 39_769, """annotations""": target}
# encode them
UpperCamelCase :Optional[int] = DeformableDetrImageProcessor()
UpperCamelCase :Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase :Union[str, Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , __lowerCamelCase )
UpperCamelCase :Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
UpperCamelCase :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowerCamelCase ) )
# verify boxes
UpperCamelCase :List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowerCamelCase )
UpperCamelCase :List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
UpperCamelCase :Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowerCamelCase ) )
# verify is_crowd
UpperCamelCase :List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowerCamelCase ) )
# verify class_labels
UpperCamelCase :Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowerCamelCase ) )
# verify orig_size
UpperCamelCase :Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowerCamelCase ) )
# verify size
UpperCamelCase :int = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowerCamelCase ) )
@slow
def _A ( self : str ):
# prepare image, target and masks_path
UpperCamelCase :Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase :Any = json.loads(f.read() )
UpperCamelCase :int = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
UpperCamelCase :Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase :Tuple = DeformableDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase :Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase :Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , __lowerCamelCase )
UpperCamelCase :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
UpperCamelCase :List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowerCamelCase ) )
# verify boxes
UpperCamelCase :List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowerCamelCase )
UpperCamelCase :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
UpperCamelCase :str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowerCamelCase ) )
# verify is_crowd
UpperCamelCase :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowerCamelCase ) )
# verify class_labels
UpperCamelCase :List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowerCamelCase ) )
# verify masks
UpperCamelCase :Union[str, Any] = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __lowerCamelCase )
# verify orig_size
UpperCamelCase :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowerCamelCase ) )
# verify size
UpperCamelCase :str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowerCamelCase ) )
| 62 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
while b:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = b, a % b
return a
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return a if b == 0 else euclidean_gcd_recursive(A_ , a % b )
def __SCREAMING_SNAKE_CASE ( ):
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 106 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 83 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( _a ):
UpperCamelCase__ = ['''pixel_values''']
def __init__( self :List[str] , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :bool = True , __magic_name__ :Union[int, float] = 1 / 255 , __magic_name__ :bool = True , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :bool = True , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**snake_case_ )
a = size if size is not None else {"""shortest_edge""": 224}
a = get_size_dict(snake_case_ , default_to_square=snake_case_ )
a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
a = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name="""crop_size""" )
a = do_resize
a = size
a = resample
a = do_center_crop
a = crop_size
a = do_rescale
a = rescale_factor
a = do_normalize
a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a = image_std if image_std is not None else OPENAI_CLIP_STD
a = do_convert_rgb
def lowerCamelCase__ ( self :Any , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :List[str] , ):
'''simple docstring'''
a = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
a = get_resize_output_image_size(snake_case_ , size=size["""shortest_edge"""] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :str , ):
'''simple docstring'''
a = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(snake_case_ , size=(size["""height"""], size["""width"""]) , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self :str , __magic_name__ :np.ndarray , __magic_name__ :Union[int, float] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :np.ndarray , __magic_name__ :Union[float, List[float]] , __magic_name__ :Union[float, List[float]] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Dict , ):
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :ImageInput , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = None , __magic_name__ :bool = None , __magic_name__ :int = None , __magic_name__ :bool = None , __magic_name__ :float = None , __magic_name__ :bool = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :bool = None , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :Optional[ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ :Tuple , ):
'''simple docstring'''
a = do_resize if do_resize is not None else self.do_resize
a = size if size is not None else self.size
a = get_size_dict(snake_case_ , param_name="""size""" , default_to_square=snake_case_ )
a = resample if resample is not None else self.resample
a = do_center_crop if do_center_crop is not None else self.do_center_crop
a = crop_size if crop_size is not None else self.crop_size
a = get_size_dict(snake_case_ , param_name="""crop_size""" , default_to_square=snake_case_ )
a = do_rescale if do_rescale is not None else self.do_rescale
a = rescale_factor if rescale_factor is not None else self.rescale_factor
a = do_normalize if do_normalize is not None else self.do_normalize
a = image_mean if image_mean is not None else self.image_mean
a = image_std if image_std is not None else self.image_std
a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
a = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
a = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
a = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
a = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
a = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
a = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
a = {"""pixel_values""": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 371 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 347 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(_A , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowercase_ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int:
pass
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase = TFVisionTextDualEncoderModel(__lowerCamelCase )
lowerCAmelCase = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
lowerCAmelCase = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase , lowerCAmelCase = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCamelCase )
lowerCAmelCase = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
lowerCAmelCase = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase )
lowerCAmelCase = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
lowerCAmelCase = after_output[0].numpy()
lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1e-5 )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
lowerCAmelCase = model(
input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase )
lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = to_atuple(vision_model.config.image_size )
lowerCAmelCase = to_atuple(vision_model.config.patch_size )
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Dict:
lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(__lowerCamelCase , __lowerCamelCase , F"Difference between torch and flax is {diff} (>= {tol})." )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.get_pretrained_model_and_inputs()
lowerCAmelCase = model_a(**__lowerCamelCase )
lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowerCamelCase )
lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase )
lowerCAmelCase = model_a(**__lowerCamelCase )
lowerCAmelCase = after_outputs[0].numpy()
lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1e-5 )
@require_tf
class lowercase_ ( A__ , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
lowerCAmelCase = 13
lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase = random_attention_mask([batch_size, 4] )
lowerCAmelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = TFViTModel(__lowerCamelCase , name='''vision_model''' )
lowerCAmelCase = TFBertModel(__lowerCamelCase , name='''text_model''' )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = TFViTModelTester(self )
lowerCAmelCase = TFBertModelTester(self )
lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = vision_config_and_inputs
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowercase_ ( A__ , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
lowerCAmelCase = 13
lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase = random_attention_mask([batch_size, 4] )
lowerCAmelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase , lowerCAmelCase = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
lowerCAmelCase = model(
input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase )
lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase = to_atuple(vision_model.config.image_size )
lowerCAmelCase = to_atuple(vision_model.config.patch_size )
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = TFDeiTModel(__lowerCamelCase , name='''vision_model''' )
lowerCAmelCase = TFRobertaModel(__lowerCamelCase , name='''text_model''' )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = TFDeiTModelTester(self )
lowerCAmelCase = TFRobertaModelTester(self )
lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = vision_config_and_inputs
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowercase_ ( A__ , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
lowerCAmelCase = 13
lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase = random_attention_mask([batch_size, 4] )
lowerCAmelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = TFCLIPVisionModel(__lowerCamelCase , name='''vision_model''' )
lowerCAmelCase = TFBertModel(__lowerCamelCase , name='''text_model''' )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = TFCLIPVisionModelTester(self )
lowerCAmelCase = TFBertModelTester(self )
lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase = vision_config_and_inputs
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__lowerCamelCase )
lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowerCamelCase , padding=__lowerCamelCase , return_tensors='''np''' )
lowerCAmelCase = model(**__lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowerCamelCase , atol=1e-3 ) )
| 338 |
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase : int = logging.getLogger(__name__)
UpperCAmelCase : List[Any] = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase : List[str] = {"facebook/bart-base": BartTokenizer}
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=__lowerCAmelCase , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__lowerCAmelCase , )
parser.add_argument(
"""--config_name""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=__lowerCAmelCase , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Where to store the final ONNX file.""" )
lowercase_ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase="cpu" ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
lowercase_ = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
return huggingface_model, tokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
model.eval()
lowercase_ = None
lowercase_ = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
lowercase_ = '''My friends are cool but they eat too many carbs.'''
lowercase_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="""pt""" ).to(model.device )
lowercase_ = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=__lowerCAmelCase , )
logger.info("""Model exported to {}""".format(__lowerCAmelCase ) )
lowercase_ = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(__lowerCAmelCase ) )
lowercase_ = onnxruntime.InferenceSession(__lowerCAmelCase )
lowercase_ = ort_sess.run(
__lowerCAmelCase , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(__lowerCAmelCase ),
"""max_length""": np.array(__lowerCAmelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = parse_args()
lowercase_ = 5
lowercase_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowercase_ = torch.device(args.device )
lowercase_ = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(__lowerCAmelCase )
if args.max_length:
lowercase_ = args.max_length
if args.num_beams:
lowercase_ = args.num_beams
if args.output_file_path:
lowercase_ = args.output_file_path
else:
lowercase_ = '''BART.onnx'''
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 350 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowercase :
__lowercase : Optional[int] = PegasusConfig
__lowercase : List[str] = {}
__lowercase : int = "gelu"
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=40 , A_=2 , A_=1 , A_=0 , ) -> Dict:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_pegasus_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFPegasusModel(config=A_ ).get_decoder()
UpperCamelCase = inputs_dict['input_ids']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['attention_mask'][:1, :]
UpperCamelCase = inputs_dict['head_mask']
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(A_ , attention_mask=A_ )[0]
UpperCamelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def A ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Dict = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowercase : Optional[int] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Any = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Optional[Any] = True
__lowercase : Any = False
__lowercase : Union[str, Any] = False
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = TFPegasusModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
__lowercase : str = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowercase : int = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowercase : Tuple = "google/pegasus-xsum"
@cached_property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __UpperCamelCase ( self , **A_ ) -> int:
"""simple docstring"""
UpperCamelCase = self.translate_src_text(**A_ )
assert self.expected_text == generated_words
def __UpperCamelCase ( self , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.tokenizer(self.src_text , **A_ , padding=A_ , return_tensors='tf' )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A_ , )
UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )
return generated_words
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 222 |
import requests
def A ( lowercase , lowercase ) -> None:
'''simple docstring'''
UpperCamelCase = {'Content-Type': 'application/json'}
UpperCamelCase = requests.post(lowercase , json={'text': message_body} , headers=lowercase )
if response.status_code != 200:
UpperCamelCase = (
'Request to slack returned an error '
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 222 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Optional[int] = ['''torch''', '''scipy''']
def __init__( self: Union[str, Any] , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: List[str] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def lowerCamelCase_ ( cls: Any , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: int ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 363 |
from collections.abc import Sequence
from queue import Queue
class _a :
def __init__( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Dict=None ) -> Tuple:
"""simple docstring"""
lowercase__ = start
lowercase__ = end
lowercase__ = val
lowercase__ = (start + end) // 2
lowercase__ = left
lowercase__ = right
def __repr__( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class _a :
def __init__( self: Any , UpperCamelCase_: Sequence , UpperCamelCase_: Any ) -> List[str]:
"""simple docstring"""
lowercase__ = collection
lowercase__ = function
if self.collection:
lowercase__ = self._build_tree(0 , len(UpperCamelCase_ ) - 1 )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self._update_tree(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: str , UpperCamelCase_: int , UpperCamelCase_: List[str] ) -> Optional[Any]:
"""simple docstring"""
return self._query_range(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict ) -> str:
"""simple docstring"""
if start == end:
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.collection[start] )
lowercase__ = (start + end) // 2
lowercase__ = self._build_tree(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self._build_tree(mid + 1 , UpperCamelCase_ )
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.fn(left.val , right.val ) , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] ) -> Dict:
"""simple docstring"""
if node.start == i and node.end == i:
lowercase__ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
self._update_tree(node.right , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.fn(node.left.val , node.right.val )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Dict ) -> List[Any]:
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
if self.root is not None:
lowercase__ = Queue()
queue.put(self.root )
while not queue.empty():
lowercase__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 93 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Union[str, Any] = {"""vocab_file""": """vocab.json"""}
SCREAMING_SNAKE_CASE__:List[str] = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
SCREAMING_SNAKE_CASE__:Optional[Any] = {"""mgp-str""": 27}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCamelCase , lowerCamelCase="[GO]" , lowerCamelCase="[GO]" , lowerCamelCase="[s]" , lowerCamelCase="[GO]" , **lowerCamelCase ):
super().__init__(
unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__a = json.load(lowerCamelCase )
__a = {v: k for k, v in self.vocab.items()}
@property
def a__ ( self ):
return len(self.vocab )
def a__ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def a__ ( self , lowerCamelCase ):
__a = []
for s in text:
char_tokens.extend(lowerCamelCase )
return char_tokens
def a__ ( self , lowerCamelCase ):
return self.vocab.get(lowerCamelCase , self.vocab.get(self.unk_token ) )
def a__ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCamelCase ) )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
return (vocab_file,)
| 261 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: int = AltDiffusionPipeline
__magic_name__: Any = TEXT_TO_IMAGE_PARAMS
__magic_name__: Any = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__: Any = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__: Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
snake_case_ : Any = CLIPTextModel(_A )
snake_case_ : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
snake_case_ : Dict = 77
snake_case_ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self : int , _A : Optional[int] , _A : int=0 ) -> Dict:
"""simple docstring"""
if str(_A ).startswith('mps' ):
snake_case_ : Union[str, Any] = torch.manual_seed(_A )
else:
snake_case_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
snake_case_ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Any = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ : Any = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ : Optional[Any] = RobertaSeriesModelWithTransformation(_A )
snake_case_ : Optional[Any] = text_encoder
snake_case_ : Optional[Any] = AltDiffusionPipeline(**_A )
snake_case_ : List[Any] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : Optional[Any] = self.get_dummy_inputs(_A )
snake_case_ : int = 'A photo of an astronaut'
snake_case_ : Tuple = alt_pipe(**_A )
snake_case_ : Any = output.images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Any = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Any = self.get_dummy_components()
snake_case_ : List[str] = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
snake_case_ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ : Tuple = RobertaSeriesModelWithTransformation(_A )
snake_case_ : Any = text_encoder
snake_case_ : Tuple = AltDiffusionPipeline(**_A )
snake_case_ : Dict = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : Dict = self.get_dummy_inputs(_A )
snake_case_ : Tuple = alt_pipe(**_A )
snake_case_ : int = output.images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Optional[int] = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=_A )
snake_case_ : Optional[int] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : str = 'A painting of a squirrel eating a burger'
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : str = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
snake_case_ : Any = output.images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : Union[str, Any] = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
snake_case_ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=_A , safety_checker=_A )
snake_case_ : List[str] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : List[Any] = 'A painting of a squirrel eating a burger'
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[Any] = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='numpy' )
snake_case_ : Any = output.images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : List[Any] = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 88 | 0 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return math.sqrt(__snake_case ) * math.sqrt(__snake_case ) == num
def _lowerCAmelCase ( __snake_case : int ) -> bool:
__A : str = 0
__A : List[Any] = n
while left <= right:
__A : List[Any] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__A : List[str] = mid - 1
else:
__A : Any = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''llama'''
lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=3_2000 , _UpperCAmelCase=4096 , _UpperCAmelCase=1_1008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = vocab_size
__A : Union[str, Any] = max_position_embeddings
__A : Any = hidden_size
__A : Optional[Any] = intermediate_size
__A : str = num_hidden_layers
__A : Optional[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__A : List[Any] = num_attention_heads
__A : int = num_key_value_heads
__A : List[Any] = hidden_act
__A : Union[str, Any] = initializer_range
__A : List[Any] = rms_norm_eps
__A : Any = pretraining_tp
__A : Optional[Any] = use_cache
__A : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}')
__A : Optional[Any] = self.rope_scaling.get('type' , _UpperCAmelCase)
__A : Tuple = self.rope_scaling.get('factor' , _UpperCAmelCase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}') | 190 | 1 |
def lowerCamelCase__ ( ):
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__a = generate_large_matrix()
__a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
assert all(row == sorted(a__ , reverse=a__ ) for row in grid )
assert all(list(a__ ) == sorted(a__ , reverse=a__ ) for col in zip(*a__ ) )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Dict = len(a__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase_ : List[Any] = (left + right) // 2
UpperCAmelCase_ : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase_ : int = mid + 1
else:
UpperCAmelCase_ : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a__ )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[Any] = len(grid[0] )
for i in range(len(a__ ) ):
UpperCAmelCase_ : str = find_negative_index(grid[i][:bound] )
total += bound
return (len(a__ ) * len(grid[0] )) - total
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = 0
for row in grid:
for i, number in enumerate(a__ ):
if number < 0:
total += len(a__ ) - i
break
return total
def lowerCamelCase__ ( ):
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
UpperCAmelCase_ : List[Any] = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase_ : List[Any] = timeit(f'''{func}(grid=grid)''' , setup=a__ , number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 369 |
from collections import defaultdict
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = first_str.lower().strip()
UpperCAmelCase_ : Any = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase_ : Any = first_str.replace(''' ''' , '''''' )
UpperCAmelCase_ : int = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_lowercase ) != len(_lowercase ):
return False
# Default values for count should be 0
UpperCAmelCase_ : defaultdict[str, int] = defaultdict(_lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__a = input('Enter the first string ').strip()
__a = input('Enter the second string ').strip()
__a = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""") | 235 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCAmelCase_ (__a : Union[str, Any] ):
"""simple docstring"""
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
_a : List[Any] = [image]
_a : Optional[int] = [trans(img.convert('RGB' ) ) for img in image]
_a : Dict = torch.stack(__a )
return image
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_a : Any = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_a ,scheduler=_a )
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def __lowercase ( self : Optional[int] ,_a : Dict ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Optional[Any] = min(int(num_inference_steps * strength ) ,_a )
_a : Union[str, Any] = max(num_inference_steps - init_timestep ,0 )
_a : Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowercase ( self : int ,_a : List[str] ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : List[str] ,_a : List[Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(_a ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_a )}""" )
_a : Dict = image.to(device=_a ,dtype=_a )
if isinstance(_a ,_a ) and len(_a ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_a )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_a : str = init_latents.shape
_a : int = randn_tensor(_a ,generator=_a ,device=_a ,dtype=_a )
# get latents
print('add noise to latents at timestep' ,_a )
_a : int = self.scheduler.add_noise(_a ,_a ,_a )
_a : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : List[Any] ,_a : Union[torch.FloatTensor, PIL.Image.Image] = None ,_a : float = 0.8 ,_a : int = 1 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : float = 0.0 ,_a : int = 50 ,_a : Optional[bool] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,):
'''simple docstring'''
self.check_inputs(_a )
# 2. Preprocess image
_a : Dict = preprocess(_a )
# 3. set timesteps
self.scheduler.set_timesteps(_a ,device=self.device )
_a, _a : str = self.get_timesteps(_a ,_a ,self.device )
_a : List[str] = timesteps[:1].repeat(_a )
# 4. Prepare latent variables
_a : str = self.prepare_latents(_a ,_a ,_a ,self.unet.dtype ,self.device ,_a )
_a : Any = latents
# 5. Denoising loop
for t in self.progress_bar(_a ):
# 1. predict noise model_output
_a : List[Any] = self.unet(_a ,_a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_a : List[str] = self.scheduler.step(
_a ,_a ,_a ,eta=_a ,use_clipped_model_output=_a ,generator=_a ,).prev_sample
_a : str = (image / 2 + 0.5).clamp(0 ,1 )
_a : List[str] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_a : str = self.numpy_to_pil(_a )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_a )
| 271 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = 'https://pypi.org/pypi/diffusers/json'
_a : int = json.loads(request.urlopen(__a ).read() )['releases'].keys()
return sorted(__a , key=lambda __a : version.Version(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a , exist_ok=__a )
_a : str = Path(__a ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_a : Dict = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a , exist_ok=__a )
_a : Optional[int] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : int = f.read()
# Imports of the form `import .xxx`
_a : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , __a , flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = False
_a : Optional[int] = [module_file]
_a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_a : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Union[str, Any] = Path(__a ).parent
_a : str = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [f"""{f}.py""" for f in new_import_files]
_a : List[str] = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.read()
# Imports of the form `import xxx`
_a : Optional[int] = re.findall('^\s*import\s+(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , __a , flags=re.MULTILINE )
# Only keep the top-level module
_a : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_a : Optional[int] = list(set(__a ) )
_a : List[str] = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep , '.' )
_a : Union[str, Any] = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : List[str] = dict(inspect.getmembers(__a , inspect.isclass ) )
_a : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __a )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_a : Any = cls
return pipeline_class
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , ):
"""simple docstring"""
_a : str = str(__a )
_a : Optional[Any] = os.path.join(__a , __a )
if os.path.isfile(__a ):
_a : Tuple = module_file_or_url
_a : Optional[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Any = f"""v{revision}"""
elif revision == "main":
_a : Optional[int] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__a , pipeline=__a )
try:
_a : Any = cached_download(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = 'git'
_a : Any = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a , __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Any = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a , submodule_path / module_file )
for module_needed in modules_needed:
_a : Dict = f"""{module_needed}.py"""
shutil.copy(os.path.join(__a , __a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a , __a ):
_a : Optional[Any] = use_auth_token
elif use_auth_token is True:
_a : List[Any] = HfFolder.get_token()
else:
_a : Dict = None
_a : int = model_info(__a , revision=__a , token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : Optional[int] = submodule_path / commit_hash
_a : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a , f"""{module_needed}.py""" , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return os.path.join(__a , __a )
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[str] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : str , ):
"""simple docstring"""
_a : Dict = get_cached_module_file(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return get_class_in_module(__a , final_module.replace('.py' , '' ) )
| 271 | 1 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__UpperCamelCase ,__UpperCamelCase ) ) )
def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
A_ = (
"Wrong input data's dimensions... "
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__UpperCamelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
A_ = (
"Wrong input data's shape... "
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__UpperCamelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
A_ = (
"Input data have different datatype... "
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__UpperCamelCase )
A_ = []
for value in value_array:
A_ = euclidean(__UpperCamelCase ,dataset[0] )
A_ = dataset[0].tolist()
for dataset_value in dataset[1:]:
A_ = euclidean(__UpperCamelCase ,__UpperCamelCase )
if dist > temp_dist:
A_ = temp_dist
A_ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ):
"""simple docstring"""
return np.dot(__UpperCamelCase ,__UpperCamelCase ) / (norm(__UpperCamelCase ) * norm(__UpperCamelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 329 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 ) | 329 | 1 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowercase__ : Tuple = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
lowercase__ : Optional[Any] = F"""https://www.google.com/search?q={query}&num=100"""
lowercase__ : List[Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
lowercase__ : Optional[int] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
lowercase__ : str = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 324 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple=7 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Tuple=18 , _lowerCamelCase : Optional[int]=30 , _lowerCamelCase : int=400 , _lowerCamelCase : Tuple=True , _lowerCamelCase : Any=None , _lowerCamelCase : int=True , ):
"""simple docstring"""
A_ : str = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : str = parent
A_ : str = batch_size
A_ : List[Any] = num_channels
A_ : Optional[Any] = image_size
A_ : List[str] = min_resolution
A_ : Tuple = max_resolution
A_ : Dict = do_resize
A_ : Optional[Any] = size
A_ : Union[str, Any] = apply_ocr
def _a ( self : List[str] ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self : str ):
"""simple docstring"""
A_ : int = LayoutLMvaImageProcessingTester(self )
@property
def _a ( self : Any ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''apply_ocr''' ) )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
def _a ( self : Any ):
"""simple docstring"""
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , _lowerCamelCase )
self.assertIsInstance(encoding.boxes , _lowerCamelCase )
# Test batched
A_ : Optional[int] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _a ( self : int ):
"""simple docstring"""
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
A_ : Any = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
A_ : List[str] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ : Tuple = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
A_ : Dict = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
A_ : Dict = image_processing(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ : Any = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
A_ : Optional[int] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowerCamelCase )
self.assertListEqual(encoding.boxes , _lowerCamelCase )
# with apply_OCR = False
A_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
A_ : Dict = image_processing(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 4 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 1 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_SCREAMING_SNAKE_CASE : int = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCamelCase_( snake_case : List[Any] , snake_case : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , snake_case )
| 85 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["CLIPFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Dict = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _A ( unittest.TestCase ):
lowercase__: str = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__: str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowercase__ ( self : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case : int = TextaTextGenerationPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
return generator, ["Something to write", "Something else"]
def lowercase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Any = generator("""Something there""" )
self.assertEqual(__magic_name__ , [{"""generated_text""": ANY(__magic_name__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__snake_case : Optional[int] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__magic_name__ )
self.assertEqual(
__magic_name__ , [
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
] , )
__snake_case : List[Any] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__magic_name__ )
self.assertEqual(
__magic_name__ , [
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
] , )
with self.assertRaises(__magic_name__ ):
generator(4 )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Dict = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__snake_case : int = generator("""Something there""" , do_sample=__magic_name__ )
self.assertEqual(__magic_name__ , [{"""generated_text""": """"""}] )
__snake_case : str = 3
__snake_case : int = generator(
"""Something there""" , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , )
__snake_case : Any = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(__magic_name__ , __magic_name__ )
__snake_case : List[str] = generator("""This is a test""" , do_sample=__magic_name__ , num_return_sequences=2 , return_tensors=__magic_name__ )
self.assertEqual(
__magic_name__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__snake_case : Any = generator.model.config.eos_token_id
__snake_case : Any = """<pad>"""
__snake_case : Optional[int] = generator(
["""This is a test""", """This is a second test"""] , do_sample=__magic_name__ , num_return_sequences=2 , batch_size=2 , return_tensors=__magic_name__ , )
self.assertEqual(
__magic_name__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
__snake_case : int = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__snake_case : List[Any] = generator("""Something there""" , do_sample=__magic_name__ )
self.assertEqual(__magic_name__ , [{"""generated_text""": """"""}] )
| 359 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = KandinskyImgaImgPipeline
lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowercase__: int = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowercase__: List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase__: Any = False
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : Tuple = MultilingualCLIP(__magic_name__ )
__snake_case : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case : Tuple = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : Dict = self.dummy_tokenizer
__snake_case : Dict = self.dummy_unet
__snake_case : int = self.dummy_movq
__snake_case : List[Any] = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case : Dict = DDIMScheduler(**__magic_name__ )
__snake_case : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str:
"""simple docstring"""
__snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
__snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : str = torch.manual_seed(__magic_name__ )
else:
__snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : Dict = """cpu"""
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : List[str] = output.images
__snake_case : Any = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case : List[Any] = """A red cartoon frog, 4k"""
__snake_case : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
__snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case : Any = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case , __snake_case : Optional[Any] = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case : List[str] = pipeline(
__magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
__snake_case : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 13 | 0 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _A ( A__ ):
"""simple docstring"""
@wraps(A__ )
def _inner_fn(*A__ , **A__ ):
warnings.warn(
(F"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , A__ , )
return fn(*A__ , **A__ )
return _inner_fn
| 104 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ ):
"""simple docstring"""
__lowercase = len(A__ )
# We need to create solution object to save path.
__lowercase = [[0 for _ in range(A__ )] for _ in range(A__ )]
__lowercase = run_maze(A__ , 0 , 0 , A__ )
if solved:
print('''\n'''.join(str(A__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = len(A__ )
# Final check point.
if i == j == (size - 1):
__lowercase = 1
return True
__lowercase = (not i < 0) and (not j < 0) # Check lower bounds
__lowercase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowercase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowercase = 1
# check for directions
if (
run_maze(A__ , i + 1 , A__ , A__ )
or run_maze(A__ , A__ , j + 1 , A__ )
or run_maze(A__ , i - 1 , A__ , A__ )
or run_maze(A__ , A__ , j - 1 , A__ )
):
return True
__lowercase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : str , a__ : Tuple , a__ : Dict=13 , a__ : int=7 , a__ : int=True , a__ : Dict=True , a__ : Optional[int]=True , a__ : Optional[int]=True , a__ : int=99 , a__ : Union[str, Any]=32 , a__ : Any=5 , a__ : Optional[Any]=4 , a__ : Tuple=37 , a__ : List[str]="gelu" , a__ : List[str]=0.1 , a__ : Optional[Any]=0.1 , a__ : Union[str, Any]=512 , a__ : Optional[int]=16 , a__ : Optional[Any]=2 , a__ : Union[str, Any]=0.02 , a__ : Any=4 , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_attention_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_choices
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_attention_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Dict ):
__magic_name__ = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : List[Any] ):
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained('''albert-base-v2''' )
__magic_name__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def snake_case__ ( self : Tuple ):
__magic_name__ = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
__magic_name__ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__magic_name__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__magic_name__ = model(a__ , attention_mask=a__ )[0]
__magic_name__ = (1, 11, 768)
self.assertEqual(output.shape , a__ )
__magic_name__ = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a__ , atol=1E-4 ) )
| 369 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a__ : Any , a__ : Tuple=sys.maxsize ):
__magic_name__ = '''bilinear'''
__magic_name__ = max_size
__magic_name__ = short_edge_length
def __call__( self : Tuple , a__ : List[str] ):
__magic_name__ = []
for img in imgs:
__magic_name__ , __magic_name__ = img.shape[:2]
# later: provide list and randomly choose index for resize
__magic_name__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__magic_name__ = size * 1.0 / min(a__ , a__ )
if h < w:
__magic_name__ , __magic_name__ = size, scale * w
else:
__magic_name__ , __magic_name__ = scale * h, size
if max(a__ , a__ ) > self.max_size:
__magic_name__ = self.max_size * 1.0 / max(a__ , a__ )
__magic_name__ = newh * scale
__magic_name__ = neww * scale
__magic_name__ = int(neww + 0.5 )
__magic_name__ = int(newh + 0.5 )
if img.dtype == np.uinta:
__magic_name__ = Image.fromarray(a__ )
__magic_name__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__magic_name__ = np.asarray(a__ )
else:
__magic_name__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__magic_name__ = nn.functional.interpolate(
a__ , (newh, neww) , mode=self.interp_method , align_corners=a__ ).squeeze(0 )
img_augs.append(a__ )
return img_augs
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a__ : Tuple ):
__magic_name__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__magic_name__ = cfg.INPUT.FORMAT
__magic_name__ = cfg.SIZE_DIVISIBILITY
__magic_name__ = cfg.PAD_VALUE
__magic_name__ = cfg.INPUT.MAX_SIZE_TEST
__magic_name__ = cfg.MODEL.DEVICE
__magic_name__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__magic_name__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__magic_name__ = lambda a__ : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self : Union[str, Any] , a__ : Dict ):
__magic_name__ = tuple(max(a__ ) for s in zip(*[img.shape for img in images] ) )
__magic_name__ = [im.shape[-2:] for im in images]
__magic_name__ = [
nn.functional.pad(
a__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(a__ , a__ )
]
return torch.stack(a__ ), torch.tensor(a__ )
def __call__( self : Dict , a__ : Dict , a__ : List[str]=False ):
with torch.no_grad():
if not isinstance(a__ , a__ ):
__magic_name__ = [images]
if single_image:
assert len(a__ ) == 1
for i in range(len(a__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(a__ , images.pop(a__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
a__ , torch.as_tensor(img_tensorize(images.pop(a__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__magic_name__ = torch.tensor([im.shape[:2] for im in images] )
__magic_name__ = self.aug(a__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__magic_name__ = [self.normalizer(a__ ) for x in images]
# now pad them to do the following operations
__magic_name__ , __magic_name__ = self.pad(a__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__magic_name__ = torch.true_divide(a__ , a__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCamelCase ( a , a ) -> Any:
'''simple docstring'''
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
__magic_name__ , __magic_name__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 98 | 0 |
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = len(A_ )
for i in range(length - 1 ):
__magic_name__ = i
for k in range(i + 1, A_ ):
if collection[k] < collection[least]:
__magic_name__ = k
if least != i:
__magic_name__ , __magic_name__ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : str = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 88 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 | 0 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: Any ) -> str:
'''simple docstring'''
__lowerCamelCase : List[str] = len(_lowerCamelCase )
for i in range(length - 1 ):
__lowerCamelCase : Union[str, Any] = i
for k in range(i + 1 , _lowerCamelCase ):
if collection[k] < collection[least]:
__lowerCamelCase : Dict = k
if least != i:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__A = input('''Enter numbers separated by a comma:\n''').strip()
__A = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted)) | 64 | """simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 64 | 1 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_UpperCamelCase: Tuple = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase : Any = {}
state_dict.pop('pixel_mean' , _UpperCAmelCase )
state_dict.pop('pixel_std' , _UpperCAmelCase )
lowercase : Any = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase : Optional[int] = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : List[Any] = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(2 ) )
if layer_nb == 0:
lowercase : str = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
lowercase : Optional[int] = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
lowercase : Optional[Any] = key.replace('layers.2' , 'proj_out' )
lowercase : int = value
lowercase : Union[str, Any] = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="ybelkada/segment-anything" ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any = hf_hub_download(_UpperCAmelCase , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowercase : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
lowercase : Dict = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowercase : Optional[int] = SamConfig(
vision_config=_UpperCAmelCase , )
elif "sam_vit_h" in model_name:
lowercase : Tuple = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowercase : Optional[int] = SamConfig(
vision_config=_UpperCAmelCase , )
lowercase : Optional[int] = torch.load(_UpperCAmelCase , map_location='cpu' )
lowercase : Dict = replace_keys(_UpperCAmelCase )
lowercase : List[str] = SamImageProcessor()
lowercase : Dict = SamProcessor(image_processor=_UpperCAmelCase )
lowercase : int = SamModel(_UpperCAmelCase )
hf_model.load_state_dict(_UpperCAmelCase )
lowercase : Any = hf_model.to('cuda' )
lowercase : Any = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
lowercase : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowercase : Any = [[[4_00, 6_50]]]
lowercase : int = [[1]]
lowercase : Dict = processor(images=np.array(_UpperCAmelCase ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowercase : int = hf_model(**_UpperCAmelCase )
lowercase : Dict = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowercase : Dict = processor(
images=np.array(_UpperCAmelCase ) , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowercase : str = hf_model(**_UpperCAmelCase )
lowercase : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowercase : str = ((75, 2_75, 17_25, 8_50),)
lowercase : List[str] = processor(images=np.array(_UpperCAmelCase ) , input_boxes=_UpperCAmelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowercase : int = hf_model(**_UpperCAmelCase )
lowercase : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowercase : List[str] = [[[4_00, 6_50], [8_00, 6_50]]]
lowercase : Optional[Any] = [[1, 1]]
lowercase : Tuple = processor(
images=np.array(_UpperCAmelCase ) , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowercase : Dict = hf_model(**_UpperCAmelCase )
lowercase : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
_UpperCamelCase: Tuple = argparse.ArgumentParser()
_UpperCamelCase: str = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
_UpperCamelCase: Optional[int] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 255 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = StableDiffusionDiffEditPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def lowercase ( self : Any ) -> Dict:
torch.manual_seed(0 )
lowercase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase, )
lowercase : Tuple = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_one=lowerCAmelCase, )
lowercase : Any = DDIMInverseScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_zero=lowerCAmelCase, )
torch.manual_seed(0 )
lowercase : int = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
lowercase : str = CLIPTextModel(lowerCAmelCase )
lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Tuple=0 ) -> Union[str, Any]:
lowercase : List[Any] = floats_tensor((1, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[Any] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Tuple = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Dict=0 ) -> Optional[Any]:
lowercase : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : List[str] = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : List[Any] = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int], lowerCAmelCase : Any, lowerCAmelCase : List[str]=0 ) -> Union[str, Any]:
lowercase : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Tuple = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Tuple = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : List[str] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Union[str, Any] = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int] ) -> str:
if not hasattr(self.pipeline_class, '_optional_components' ):
return
lowercase : Optional[int] = self.get_dummy_components()
lowercase : int = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase : List[Any] = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Any = pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
lowercase : Any = self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase, lowerCAmelCase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', )
lowercase : Tuple = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Optional[Any] = pipe_loaded(**lowerCAmelCase )[0]
lowercase : List[Any] = np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase, 1e-4 )
def lowercase ( self : Any ) -> str:
lowercase : Union[str, Any] = 'cpu'
lowercase : Optional[int] = self.get_dummy_components()
lowercase : List[str] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_mask_inputs(lowerCAmelCase )
lowercase : str = pipe.generate_mask(**lowerCAmelCase )
lowercase : str = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
lowercase : List[str] = np.array([0] * 9 )
lowercase : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def lowercase ( self : int ) -> str:
lowercase : int = 'cpu'
lowercase : Dict = self.get_dummy_components()
lowercase : Optional[int] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : Tuple = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : List[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowercase ( self : List[str] ) -> Tuple:
lowercase : Dict = 'cpu'
lowercase : Any = self.get_dummy_components()
lowercase : List[Any] = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
lowercase : List[str] = DPMSolverMultistepScheduler(**lowerCAmelCase )
lowercase : Dict = DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
lowercase : str = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : int = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowercase ( cls : Optional[int] ) -> Tuple:
lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
lowercase : Optional[Any] = raw_image.convert('RGB' ).resize((768, 768) )
lowercase : Any = raw_image
def lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase : str = torch.manual_seed(0 )
lowercase : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : int = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : Tuple = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase ).latents
lowercase : str = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, output_type='numpy', ).images[0]
lowercase : Dict = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase : Dict = torch.manual_seed(0 )
lowercase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Union[str, Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : List[str] = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase, num_inference_steps=25, ).latents
lowercase : int = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
lowercase : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 255 | 1 |
import inspect
import unittest
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase_ (self ):
import diffusers
from diffusers.dependency_versions_table import deps
UpperCamelCase__ = inspect.getmembers(SCREAMING_SNAKE_CASE_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
UpperCamelCase__ = """k-diffusion"""
elif backend == "invisible_watermark":
UpperCamelCase__ = """invisible-watermark"""
assert backend in deps, F"{backend} is not in the deps table!"
| 361 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ (*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
pass
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ = np.array(__a )
UpperCamelCase__ = npimg.shape
return {"hash": hashimage(__a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCAmelCase_ (self ):
pass
@slow
@require_torch
def UpperCAmelCase_ (self ):
UpperCamelCase__ = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
UpperCamelCase__ = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """facebook/sam-vit-huge"""
UpperCamelCase__ = pipeline("""mask-generation""" , model=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
] , )
| 178 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
UpperCAmelCase_ = MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase_ ( self :Optional[Any] ) -> Optional[int]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase_ ( self :Union[str, Any] ) -> Any:
UpperCAmelCase__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
UpperCAmelCase__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 3_8015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 2_5506, "token_str": " accuser"},
] , )
UpperCAmelCase__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 3_8015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 2_5506,
"token_str": " accuser",
},
] , )
UpperCAmelCase__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 1_3606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def UpperCAmelCase_ ( self :List[Any] ) -> Any:
UpperCAmelCase__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
UpperCAmelCase__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 3_5676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 1_6416, "token_str": "ELS"},
] , )
UpperCAmelCase__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 3_5676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 1_6416, "token_str": "ELS"},
] , )
UpperCAmelCase__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 1_3606, "token_str": " Clara"},
] , )
UpperCAmelCase__ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 3_5676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 3_5676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def UpperCAmelCase_ ( self :Any ) -> Optional[Any]:
UpperCAmelCase__ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
UpperCAmelCase__ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
@require_torch
def UpperCAmelCase_ ( self :Any ) -> Dict:
UpperCAmelCase__ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(lowerCamelCase )
@slow
@require_tf
def UpperCAmelCase_ ( self :Any ) -> Union[str, Any]:
UpperCAmelCase__ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(lowerCamelCase )
def UpperCAmelCase_ ( self :Any , lowerCamelCase :int ) -> Optional[Any]:
UpperCAmelCase__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
UpperCAmelCase__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 1_2790,
"token_str": " Lyon",
},
] , )
UpperCAmelCase__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 1_3606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def UpperCAmelCase_ ( self :List[Any] ) -> Dict:
UpperCAmelCase__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
self.run_pipeline_test(lowerCamelCase , [] )
@require_tf
def UpperCAmelCase_ ( self :Any ) -> List[Any]:
UpperCAmelCase__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
self.run_pipeline_test(lowerCamelCase , [] )
def UpperCAmelCase_ ( self :Any , lowerCamelCase :Dict , lowerCamelCase :Dict , lowerCamelCase :Tuple ) -> Optional[int]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
UpperCAmelCase__ = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
UpperCAmelCase__ = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :Tuple , lowerCamelCase :str ) -> Optional[int]:
UpperCAmelCase__ = fill_masker.tokenizer
UpperCAmelCase__ = fill_masker.model
UpperCAmelCase__ = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
lowerCamelCase , [
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
] , )
UpperCAmelCase__ = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
lowerCamelCase , [
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
] , )
UpperCAmelCase__ = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
lowerCamelCase , [
[
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
],
[
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
],
] , )
with self.assertRaises(lowerCamelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCamelCase ):
fill_masker("This is" )
self.run_test_top_k(lowerCamelCase , lowerCamelCase )
self.run_test_targets(lowerCamelCase , lowerCamelCase )
self.run_test_top_k_targets(lowerCamelCase , lowerCamelCase )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCamelCase , lowerCamelCase )
self.fill_mask_with_multiple_masks(lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :str , lowerCamelCase :str , lowerCamelCase :Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = tokenizer.get_vocab()
UpperCAmelCase__ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase__ = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase , targets=lowerCamelCase )
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCamelCase , [
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
] , )
UpperCAmelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowerCamelCase )
UpperCAmelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowerCamelCase ) )
# Call argument
UpperCAmelCase__ = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
] , )
UpperCAmelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowerCamelCase )
UpperCAmelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowerCamelCase ) )
# Score equivalence
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCamelCase )
UpperCAmelCase__ = [top_mask["token_str"] for top_mask in outputs]
UpperCAmelCase__ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase ) == set(lowerCamelCase ):
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCamelCase )
UpperCAmelCase__ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCamelCase ) , nested_simplify(lowerCamelCase ) )
# Raises with invalid
with self.assertRaises(lowerCamelCase ):
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCamelCase ):
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""] )
with self.assertRaises(lowerCamelCase ):
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="" )
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :int , lowerCamelCase :Optional[Any] ) -> str:
UpperCAmelCase__ = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase , top_k=2 )
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCamelCase , [
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
] , )
UpperCAmelCase__ = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowerCamelCase , [
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
] , )
self.assertEqual(nested_simplify(lowerCamelCase ) , nested_simplify(lowerCamelCase ) )
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Any ) -> Any:
UpperCAmelCase__ = tokenizer.get_vocab()
UpperCAmelCase__ = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
# top_k=2, ntargets=3
UpperCAmelCase__ = sorted(vocab.keys() )[:3]
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=lowerCamelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase__ = [el["token_str"] for el in sorted(lowerCamelCase , key=lambda lowerCamelCase : x["score"] , reverse=lowerCamelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase ).issubset(lowerCamelCase ):
UpperCAmelCase__ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=lowerCamelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCamelCase ) , nested_simplify(lowerCamelCase ) )
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :List[Any] , lowerCamelCase :Optional[int] ) -> List[Any]:
UpperCAmelCase__ = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
UpperCAmelCase__ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase__ = sorted(vocab.keys() )[:3]
UpperCAmelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase__ = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=lowerCamelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCamelCase ) , 3 )
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :Optional[Any] , lowerCamelCase :Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase__ = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
UpperCAmelCase__ = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowerCamelCase , [
[
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
],
[
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
],
[
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
{"sequence": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase ), "token": ANY(lowerCamelCase ), "token_str": ANY(lowerCamelCase )},
],
] , )
| 169 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowerCAmelCase : List[str] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :str , lowerCamelCase :bool , lowerCamelCase :str = None , lowerCamelCase :list = None ) -> Tuple:
UpperCAmelCase__ = None
UpperCAmelCase__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCAmelCase__ = os.path.abspath("examples" )
for item in os.listdir(lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.isfile(lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCamelCase , feature_script=lowerCamelCase , tested_section="main()" if parser_only else "training_function()" , ):
UpperCAmelCase__ = compare_against_test(
os.path.join(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = "\n".join(lowerCamelCase )
if special_strings is not None:
for string in special_strings:
UpperCAmelCase__ = diff.replace(lowerCamelCase , "" )
self.assertEqual(lowerCamelCase , "" )
def UpperCAmelCase_ ( self :List[str] ) -> Any:
self.one_complete_example("complete_nlp_example.py" , lowerCamelCase )
self.one_complete_example("complete_nlp_example.py" , lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> int:
UpperCAmelCase__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCAmelCase__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.one_complete_example("complete_cv_example.py" , lowerCamelCase , lowerCamelCase , lowerCamelCase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = False
@classmethod
def UpperCAmelCase_ ( cls :List[Any] ) -> Any:
super().setUpClass()
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self :Dict ) -> Dict:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
self.assertNotIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> int:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
if torch.cuda.is_available():
UpperCAmelCase__ = torch.cuda.device_count()
else:
UpperCAmelCase__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
else:
self.assertIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :Dict ) -> Optional[int]:
UpperCAmelCase__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
UpperCAmelCase__ = re.findall("({.+})" , lowerCamelCase )
UpperCAmelCase__ = [r for r in results if "accuracy" in r][-1]
UpperCAmelCase__ = ast.literal_eval(lowerCamelCase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self :List[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCAmelCase__ = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "tracking" ) ) )
def UpperCAmelCase_ ( self :Any ) -> Dict:
UpperCAmelCase__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self :Any ) -> Optional[int]:
UpperCAmelCase__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 169 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger("""transformers.models.encodec""")
SCREAMING_SNAKE_CASE_ = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
SCREAMING_SNAKE_CASE_ = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
SCREAMING_SNAKE_CASE_ = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
SCREAMING_SNAKE_CASE_ = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
SCREAMING_SNAKE_CASE_ = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
SCREAMING_SNAKE_CASE_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
SCREAMING_SNAKE_CASE_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_ih_l0":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_hh_l0":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_ih_l0":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_hh_l0":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_ih_l1":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_hh_l1":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_ih_l1":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_hh_l1":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
if model_name == "encodec_24khz" or "encodec_32khz":
SCREAMING_SNAKE_CASE = MAPPING_24K
elif model_name == "encodec_48khz":
SCREAMING_SNAKE_CASE = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F"""{name} was ignored""" )
continue
SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
if "*" in key:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = key.split(""".*.""" )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = """weight_v"""
elif "weight_ih_l0" in name:
SCREAMING_SNAKE_CASE = """weight_ih_l0"""
elif "weight_hh_l0" in name:
SCREAMING_SNAKE_CASE = """weight_hh_l0"""
elif "bias_ih_l0" in name:
SCREAMING_SNAKE_CASE = """bias_ih_l0"""
elif "bias_hh_l0" in name:
SCREAMING_SNAKE_CASE = """bias_hh_l0"""
elif "weight_ih_l1" in name:
SCREAMING_SNAKE_CASE = """weight_ih_l1"""
elif "weight_hh_l1" in name:
SCREAMING_SNAKE_CASE = """weight_hh_l1"""
elif "bias_ih_l1" in name:
SCREAMING_SNAKE_CASE = """bias_ih_l1"""
elif "bias_hh_l1" in name:
SCREAMING_SNAKE_CASE = """bias_hh_l1"""
elif "bias" in name:
SCREAMING_SNAKE_CASE = """bias"""
elif "weight" in name:
SCREAMING_SNAKE_CASE = """weight"""
elif "running_mean" in name:
SCREAMING_SNAKE_CASE = """running_mean"""
elif "running_var" in name:
SCREAMING_SNAKE_CASE = """running_var"""
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE = """num_batches_tracked"""
else:
SCREAMING_SNAKE_CASE = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
SCREAMING_SNAKE_CASE = [8, 5, 4, 4]
SCREAMING_SNAKE_CASE = [2.2]
SCREAMING_SNAKE_CASE = 64
SCREAMING_SNAKE_CASE = 3_20_00
SCREAMING_SNAKE_CASE = 20_48
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
elif model_name == "encodec_48khz":
SCREAMING_SNAKE_CASE = [8, 5, 4, 2]
SCREAMING_SNAKE_CASE = [3.0, 6.0, 12.0, 24.0]
SCREAMING_SNAKE_CASE = 4_80_00
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = """time_group_norm"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
SCREAMING_SNAKE_CASE = EncodecModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
SCREAMING_SNAKE_CASE = original_checkpoint["""best_state"""]
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 356 |
from __future__ import annotations
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE = i + 1
else:
SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 193 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "lilt"
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=None , UpperCAmelCase=4 , UpperCAmelCase=1024 , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = channel_shrink_ratio
_UpperCAmelCase = max_ad_position_embeddings
| 39 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowercase ( unittest.TestCase , _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
_UpperCAmelCase : int = load_tool("""text-classification""" )
self.tool.setup()
_UpperCAmelCase : Tuple = load_tool("""text-classification""" ,remote=a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
| 215 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Tuple=1_8 , UpperCAmelCase__ : Optional[int]=3_0 , UpperCAmelCase__ : Union[str, Any]=4_0_0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=True , ) -> int:
lowerCAmelCase = size if size is not None else {'height': 1_8, 'width': 1_8}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = apply_ocr
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self : Tuple ) -> int:
lowerCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Any ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def __UpperCAmelCase ( self : Dict ) -> str:
pass
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# with apply_OCR = True
lowerCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
lowerCAmelCase = Image.open(ds[0]['file'] ).convert('RGB' )
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowerCAmelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 4 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self ,A ,A ):
super().__init__()
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self ,A = 1 ,A = 2_000 ,A = None ,A = "pil" ,A = True ,**A ,):
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
UpperCAmelCase = randn_tensor(A ,generator=A ) * self.scheduler.init_noise_sigma
UpperCAmelCase = sample.to(self.device )
self.scheduler.set_timesteps(A )
self.scheduler.set_sigmas(A )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase = self.unet(A ,A ).sample
UpperCAmelCase = self.scheduler.step_correct(A ,A ,generator=A ).prev_sample
# prediction step
UpperCAmelCase = model(A ,A ).sample
UpperCAmelCase = self.scheduler.step_pred(A ,A ,A ,generator=A )
UpperCAmelCase , UpperCAmelCase = output.prev_sample, output.prev_sample_mean
UpperCAmelCase = sample_mean.clamp(0 ,1 )
UpperCAmelCase = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(A )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A )
| 357 |
"""simple docstring"""
_UpperCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert len(str(_snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCAmelCase = year // 100
UpperCAmelCase = (5 * (century % 4) + 2) % 7
UpperCAmelCase = year % 100
UpperCAmelCase = centurian % 12
UpperCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 | 0 |
def __lowerCamelCase ( __a :Union[str, Any] = 1_0_0_0_0_0_0 ) -> Dict:
"""simple docstring"""
A__ = set(range(3 , _UpperCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , _UpperCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _UpperCAmelCase , _UpperCAmelCase ) ) )
A__ = [float(_UpperCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_UpperCAmelCase , limit + 1 , _UpperCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 274 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : List[Any]=18 , __UpperCAmelCase : List[Any]=30 , __UpperCAmelCase : List[Any]=400 , __UpperCAmelCase : str=True , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=None , ) -> Any:
UpperCAmelCase_= size if size is not None else {"""shortest_edge""": 20}
UpperCAmelCase_= crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= num_channels
UpperCAmelCase_= image_size
UpperCAmelCase_= min_resolution
UpperCAmelCase_= max_resolution
UpperCAmelCase_= do_resize
UpperCAmelCase_= size
UpperCAmelCase_= do_center_crop
UpperCAmelCase_= crop_size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : int = MobileNetVaImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
UpperCAmelCase_= MobileNetVaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """crop_size""" ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
UpperCAmelCase_= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
UpperCAmelCase_= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
# Initialize image_processing
UpperCAmelCase_= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_= prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_= image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase_= image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
# Initialize image_processing
UpperCAmelCase_= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_= prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_= image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase_= image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
# Initialize image_processing
UpperCAmelCase_= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_= prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_= image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase_= image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 363 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Any , *__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Dict ) -> Optional[int]:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= eval_examples
UpperCAmelCase_= post_process_function
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Optional[Dataset] = None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "eval" , **__UpperCAmelCase : Any , ) -> Dict[str, float]:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase_= (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase_= gen_kwargs
UpperCAmelCase_= self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_= self.get_eval_dataloader(__UpperCAmelCase )
UpperCAmelCase_= self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase_= output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_= self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str = "test" , **__UpperCAmelCase : List[str] ) -> Tuple:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , """predict""" )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
| 277 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__A = None
__A = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__A = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class A :
lowerCamelCase : bool = True
lowerCamelCase : Optional[str] = None
# Automatically constructed
lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
lowerCamelCase : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
lowerCamelCase : str = field(default="""Image""" , init=__UpperCAmelCase , repr=__UpperCAmelCase )
def __call__( self ) -> Any:
'''simple docstring'''
return self.pa_type
def A__ ( self , lowerCamelCase__ ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = np.array(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCamelCase__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCamelCase__ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> "PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase__ = {}
lowercase__ , lowercase__ = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCamelCase__ ):
lowercase__ = PIL.Image.open(lowerCamelCase__ )
else:
lowercase__ = path.split("""::""" )[-1]
try:
lowercase__ = string_to_dict(lowerCamelCase__ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase__ = token_per_repo_id.get(lowerCamelCase__ )
except ValueError:
lowercase__ = None
with xopen(lowerCamelCase__ , """rb""" , use_auth_token=lowerCamelCase__ ) as f:
lowercase__ = BytesIO(f.read() )
lowercase__ = PIL.Image.open(bytes_ )
else:
lowercase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def A__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def A__ ( self , lowerCamelCase__ ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowercase__ = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
lowercase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase__ = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
lowercase__ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase__ = storage.field("""bytes""" )
else:
lowercase__ = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase__ = storage.field("""path""" )
else:
lowercase__ = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
lowercase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase__ = pa.array(
[encode_np_array(np.array(lowerCamelCase__ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase__ = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
lowercase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
def A__ ( self , lowerCamelCase__ ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(lowerCamelCase__ ):
with xopen(lowerCamelCase__ , """rb""" ) as f:
lowercase__ = f.read()
return bytes_
lowercase__ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase__ = pa.array(
[os.path.basename(lowerCamelCase__ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
def _A ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _A ( lowercase__ ):
lowercase__ = BytesIO()
if image.format in list_image_compression_formats():
lowercase__ = image.format
else:
lowercase__ = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(lowercase__ , format=lowercase__ )
return buffer.getvalue()
def _A ( lowercase__ ):
if hasattr(lowercase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _A ( lowercase__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase__ = array.dtype
lowercase__ = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase__ = dtype.kind
lowercase__ = dtype.itemsize
lowercase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase__ = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase__ = dtype_byteorder + dtype_kind + str(lowercase__ )
lowercase__ = np.dtype(lowercase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase__ = PIL.Image.fromarray(array.astype(lowercase__ ) )
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _A ( lowercase__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase__ , lowercase__ = first_non_null_value(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase__ , np.ndarray ):
lowercase__ = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
elif isinstance(lowercase__ , PIL.Image.Image ):
lowercase__ = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
else:
return objs
else:
return objs
| 164 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Any = UnCLIPImageVariationPipeline
__lowercase : Optional[int] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__lowercase : Tuple = IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__lowercase : Dict = False
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__: Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowerCamelCase_ )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__: Optional[Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowercase__: List[Any] = UnCLIPTextProjModel(**lowerCamelCase_ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__: List[Any] = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowercase__: Optional[Any] = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__: List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
lowercase__: Optional[int] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: str = self.dummy_decoder
lowercase__: List[Any] = self.dummy_text_proj
lowercase__: int = self.dummy_text_encoder
lowercase__: Any = self.dummy_tokenizer
lowercase__: Optional[Any] = self.dummy_super_res_first
lowercase__: List[Any] = self.dummy_super_res_last
lowercase__: List[str] = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowercase__: Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowercase__: List[str] = CLIPImageProcessor(crop_size=32 , size=32 )
lowercase__: Optional[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 , lowerCAmelCase__=True ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowercase__: Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowercase__: Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
if pil_image:
lowercase__: str = input_image * 0.5 + 0.5
lowercase__: Optional[Any] = input_image.clamp(0 , 1 )
lowercase__: Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__: int = DiffusionPipeline.numpy_to_pil(lowerCamelCase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[Any] = 'cpu'
lowercase__: Dict = self.get_dummy_components()
lowercase__: Any = self.pipeline_class(**lowerCamelCase_ )
lowercase__: str = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowercase__: Any = self.get_dummy_inputs(lowerCamelCase_ , pil_image=lowerCamelCase_ )
lowercase__: Dict = pipe(**lowerCamelCase_ )
lowercase__: str = output.images
lowercase__: Dict = self.get_dummy_inputs(lowerCamelCase_ , pil_image=lowerCamelCase_ )
lowercase__: Tuple = pipe(
**lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
lowercase__: str = image[0, -3:, -3:, -1]
lowercase__: str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Any = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = 'cpu'
lowercase__: str = self.get_dummy_components()
lowercase__: Union[str, Any] = self.pipeline_class(**lowerCamelCase_ )
lowercase__: List[str] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowercase__: List[Any] = self.get_dummy_inputs(lowerCamelCase_ , pil_image=lowerCamelCase_ )
lowercase__: Optional[int] = pipe(**lowerCamelCase_ )
lowercase__: Union[str, Any] = output.images
lowercase__: Dict = self.get_dummy_inputs(lowerCamelCase_ , pil_image=lowerCamelCase_ )
lowercase__: str = pipe(
**lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
lowercase__: List[str] = image[0, -3:, -3:, -1]
lowercase__: Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Dict = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Union[str, Any] = 'cpu'
lowercase__: str = self.get_dummy_components()
lowercase__: Optional[Any] = self.pipeline_class(**lowerCamelCase_ )
lowercase__: Optional[int] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowercase__: str = self.get_dummy_inputs(lowerCamelCase_ , pil_image=lowerCamelCase_ )
lowercase__: List[Any] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowercase__: Dict = pipe(**lowerCamelCase_ )
lowercase__: int = output.images
lowercase__: Optional[Any] = self.get_dummy_inputs(lowerCamelCase_ , pil_image=lowerCamelCase_ )
lowercase__: Tuple = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowercase__: Optional[Any] = pipe(
**lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
lowercase__: List[Any] = image[0, -3:, -3:, -1]
lowercase__: Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowercase__: str = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = torch.device('cpu' )
class __a :
__lowercase : Any = 1
lowercase__: Dict = self.get_dummy_components()
lowercase__: Union[str, Any] = self.pipeline_class(**lowerCamelCase_ )
lowercase__: List[str] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowercase__: List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
lowercase__: Dict = pipe.decoder.dtype
lowercase__: int = 1
lowercase__: List[str] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowercase__: Dict = pipe.prepare_latents(
lowerCamelCase_ , dtype=lowerCamelCase_ , device=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , scheduler=DummyScheduler() )
lowercase__: int = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowercase__: int = pipe.prepare_latents(
lowerCamelCase_ , dtype=lowerCamelCase_ , device=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , scheduler=DummyScheduler() )
lowercase__: Tuple = self.get_dummy_inputs(lowerCamelCase_ , pil_image=lowerCamelCase_ )
lowercase__: str = pipe(
**lowerCamelCase_ , decoder_latents=lowerCamelCase_ , super_res_latents=lowerCamelCase_ ).images
lowercase__: List[str] = self.get_dummy_inputs(lowerCamelCase_ , pil_image=lowerCamelCase_ )
# Don't pass image, instead pass embedding
lowercase__: Any = pipeline_inputs.pop('image' )
lowercase__: str = pipe.image_encoder(lowerCamelCase_ ).image_embeds
lowercase__: Any = pipe(
**lowerCamelCase_ , decoder_latents=lowerCamelCase_ , super_res_latents=lowerCamelCase_ , image_embeddings=lowerCamelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Any = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowercase__: str = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCamelCase_ , expected_max_diff=lowerCamelCase_ )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = torch_device == 'cpu'
lowercase__: Any = True
lowercase__: Dict = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , additional_params_copy_to_batched_inputs=lowerCamelCase_ , )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Any = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowercase__: List[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowerCamelCase_ , additional_params_copy_to_batched_inputs=lowerCamelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowerCamelCase_ )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowercase__: int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowercase__: List[Any] = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowercase__: Optional[int] = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowercase__: str = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase__: Any = pipeline(
lowerCamelCase_ , generator=lowerCamelCase_ , output_type='np' , )
lowercase__: Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ , 15 )
| 361 |
import torch
from diffusers import StableDiffusionPipeline
__lowerCAmelCase = '''path-to-your-trained-model'''
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
__lowerCAmelCase = '''A photo of sks dog in a bucket'''
__lowerCAmelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 288 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Tuple = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 |
"""simple docstring"""
import numpy as np
UpperCAmelCase : Optional[Any] = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.array(UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = np.where(letter == self.SQUARE )
__UpperCAmelCase : Optional[Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCamelCase__ ( self : Any , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : str = message.lower()
__UpperCAmelCase : List[Any] = message.replace(""" """ , """""" )
__UpperCAmelCase : List[Any] = message.replace("""j""" , """i""" )
__UpperCAmelCase : Optional[int] = np.empty((2, len(UpperCamelCase )) )
for letter_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : List[Any] = self.letter_to_numbers(message[letter_index] )
__UpperCAmelCase : str = numbers[0]
__UpperCAmelCase : int = numbers[1]
__UpperCAmelCase : Union[str, Any] = first_step.reshape(2 * len(UpperCamelCase ) )
__UpperCAmelCase : Optional[Any] = """"""
for numbers_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Any = int(second_step[numbers_index * 2] )
__UpperCAmelCase : Any = int(second_step[(numbers_index * 2) + 1] )
__UpperCAmelCase : str = self.numbers_to_letter(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = encoded_message + letter
return encoded_message
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = message.lower()
message.replace(""" """ , """""" )
__UpperCAmelCase : int = np.empty(2 * len(UpperCamelCase ) )
for letter_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Any = self.letter_to_numbers(message[letter_index] )
__UpperCAmelCase : Any = numbers[0]
__UpperCAmelCase : Dict = numbers[1]
__UpperCAmelCase : str = first_step.reshape((2, len(UpperCamelCase )) )
__UpperCAmelCase : Union[str, Any] = """"""
for numbers_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Optional[int] = int(second_step[0, numbers_index] )
__UpperCAmelCase : Tuple = int(second_step[1, numbers_index] )
__UpperCAmelCase : Tuple = self.numbers_to_letter(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = decoded_message + letter
return decoded_message
| 115 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
UpperCAmelCase__ = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCAmelCase__ = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = FunnelTokenizer
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = 2
def __init__( self : Optional[int] , __UpperCAmelCase : int=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]="<sep>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : List[Any]="<cls>" , __UpperCAmelCase : List[Any]="<mask>" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Tuple="##" , **__UpperCAmelCase : Tuple , ) ->List[Any]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , clean_text=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , wordpieces_prefix=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int=None ) ->Dict:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : int = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 366 |
UpperCAmelCase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 26 | 0 |
def snake_case ( snake_case__ :float , snake_case__ :float) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""")
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""")
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | """simple docstring"""
def lowercase ( a__ : int , a__ : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def lowercase ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 256 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ,__A ):
'''simple docstring'''
if b == 0:
return (1, 0)
((__UpperCamelCase) , (__UpperCamelCase)) = extended_euclid(__A ,a % b )
__UpperCamelCase = a // b
return (y, x - k * y)
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
((__UpperCamelCase) , (__UpperCamelCase)) = extended_euclid(__A ,__A )
__UpperCamelCase = na * na
__UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowercase ( __A ,__A ):
'''simple docstring'''
((__UpperCamelCase) , (__UpperCamelCase)) = extended_euclid(__A ,__A )
if b < 0:
__UpperCamelCase = (b % n + n) % n
return b
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = invert_modulo(__A ,__A ), invert_modulo(__A ,__A )
__UpperCamelCase = na * na
__UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 243 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase=0.01 , lowercase=1_0_0_0 ) -> List[Any]:
__UpperCamelCase = p_stop
__UpperCamelCase = max_length
def __iter__( self ) -> Dict:
__UpperCamelCase = 0
__UpperCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__UpperCamelCase = random.random() < self.p_stop
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self , lowercase , lowercase , lowercase=False , lowercase=True ) -> List[str]:
__UpperCamelCase = [
BatchSamplerShard(lowercase , 2 , lowercase , split_batches=lowercase , even_batches=lowercase )
for i in range(2 )
]
__UpperCamelCase = [list(lowercase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowercase ) for shard in batch_sampler_shards] , [len(lowercase ) for e in expected] )
self.assertListEqual(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase , lowercase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__UpperCamelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
__UpperCamelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
# Check the shards when the dataset is very small.
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowercase , lowercase )
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Dict:
# Check the shards when the dataset is a round multiple of batch size.
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size.
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
# Check the shards when the dataset is very small.
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__UpperCamelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
# Check the shards when the dataset is very small.
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
def __lowerCamelCase ( self ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size.
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
# Check the shards when the dataset is very small.
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
__UpperCamelCase = [BatchSamplerShard(lowercase , 2 , lowercase , even_batches=lowercase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=False , lowercase=2 , lowercase=False ) -> List[str]:
random.seed(lowercase )
__UpperCamelCase = list(lowercase )
__UpperCamelCase = [
IterableDatasetShard(
lowercase , batch_size=lowercase , drop_last=lowercase , num_processes=lowercase , process_index=lowercase , split_batches=lowercase , )
for i in range(lowercase )
]
__UpperCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowercase )
iterable_dataset_lists.append(list(lowercase ) )
__UpperCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__UpperCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowercase ) , len(lowercase ) )
self.assertTrue(len(lowercase ) % shard_batch_size == 0 )
__UpperCamelCase = []
for idx in range(0 , len(lowercase ) , lowercase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowercase ) < len(lowercase ):
reference += reference
self.assertListEqual(lowercase , reference[: len(lowercase )] )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = 4_2
__UpperCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
# Edge case with a very small dataset
__UpperCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = SkipBatchSampler(lowercase , 2 )
self.assertListEqual(list(lowercase ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = DataLoader(list(range(1_6 ) ) , batch_size=4 )
__UpperCamelCase = skip_first_batches(lowercase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowerCamelCase ( self ) -> Tuple:
Accelerator()
__UpperCamelCase = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 243 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 0 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : int = tau * frequency / samplerate
UpperCAmelCase_ : List[str] = sin(__lowerCamelCase )
UpperCAmelCase_ : int = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : int = (1 - _cos) / 2
UpperCAmelCase_ : Optional[Any] = 1 - _cos
UpperCAmelCase_ : int = 1 + alpha
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Dict = tau * frequency / samplerate
UpperCAmelCase_ : Tuple = sin(__lowerCamelCase )
UpperCAmelCase_ : Any = cos(__lowerCamelCase )
UpperCAmelCase_ : List[str] = _sin / (2 * q_factor)
UpperCAmelCase_ : List[Any] = (1 + _cos) / 2
UpperCAmelCase_ : Optional[int] = -1 - _cos
UpperCAmelCase_ : Union[str, Any] = 1 + alpha
UpperCAmelCase_ : Optional[int] = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Union[str, Any] = tau * frequency / samplerate
UpperCAmelCase_ : str = sin(__lowerCamelCase )
UpperCAmelCase_ : Tuple = cos(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Any = _sin / 2
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Tuple = -ba
UpperCAmelCase_ : Optional[Any] = 1 + alpha
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Optional[int] = 1 - alpha
UpperCAmelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : Any = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = cos(__lowerCamelCase )
UpperCAmelCase_ : str = _sin / (2 * q_factor)
UpperCAmelCase_ : List[str] = 1 - alpha
UpperCAmelCase_ : str = -2 * _cos
UpperCAmelCase_ : Any = 1 + alpha
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : Dict = tau * frequency / samplerate
UpperCAmelCase_ : Union[str, Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : int = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase_ : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase_ : List[Any] = 1 + alpha * big_a
UpperCAmelCase_ : Tuple = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha * big_a
UpperCAmelCase_ : str = 1 + alpha / big_a
UpperCAmelCase_ : List[str] = -2 * _cos
UpperCAmelCase_ : List[str] = 1 - alpha / big_a
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : str = tau * frequency / samplerate
UpperCAmelCase_ : int = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = cos(__lowerCamelCase )
UpperCAmelCase_ : Tuple = _sin / (2 * q_factor)
UpperCAmelCase_ : List[Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : int = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Dict = 2 * sqrt(__lowerCamelCase ) * alpha
UpperCAmelCase_ : List[str] = big_a * (pmc + aaa)
UpperCAmelCase_ : int = 2 * big_a * mpc
UpperCAmelCase_ : int = big_a * (pmc - aaa)
UpperCAmelCase_ : Dict = ppmc + aaa
UpperCAmelCase_ : Any = -2 * pmpc
UpperCAmelCase_ : List[str] = ppmc - aaa
UpperCAmelCase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : int = tau * frequency / samplerate
UpperCAmelCase_ : Optional[Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Tuple = 10 ** (gain_db / 40)
UpperCAmelCase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : List[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Any = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Dict = 2 * sqrt(__lowerCamelCase ) * alpha
UpperCAmelCase_ : Any = big_a * (ppmc + aaa)
UpperCAmelCase_ : Union[str, Any] = -2 * big_a * pmpc
UpperCAmelCase_ : Dict = big_a * (ppmc - aaa)
UpperCAmelCase_ : Optional[int] = pmc + aaa
UpperCAmelCase_ : Union[str, Any] = 2 * mpc
UpperCAmelCase_ : int = pmc - aaa
UpperCAmelCase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 61 | 0 |
"""simple docstring"""
def _A ( ):
"""simple docstring"""
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
UpperCAmelCase =generate_large_matrix()
UpperCAmelCase =(
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( _a : list[list[int]] ):
"""simple docstring"""
assert all(row == sorted(_a , reverse=_a ) for row in grid )
assert all(list(_a ) == sorted(_a , reverse=_a ) for col in zip(*_a ) )
def _A ( _a : list[int] ):
"""simple docstring"""
A = 0
A = len(_a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
A = (left + right) // 2
A = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
A = mid + 1
else:
A = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_a )
def _A ( _a : list[list[int]] ):
"""simple docstring"""
A = 0
A = len(grid[0] )
for i in range(len(_a ) ):
A = find_negative_index(grid[i][:bound] )
total += bound
return (len(_a ) * len(grid[0] )) - total
def _A ( _a : list[list[int]] ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def _A ( _a : list[list[int]] ):
"""simple docstring"""
A = 0
for row in grid:
for i, number in enumerate(_a ):
if number < 0:
total += len(_a ) - i
break
return total
def _A ( ):
"""simple docstring"""
from timeit import timeit
print("""Running benchmarks""" )
A = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
A = timeit(f'{func}(grid=grid)' , setup=_a , number=5_0_0 )
print(f'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77 |
"""simple docstring"""
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
A = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def _A ( _a : int ):
"""simple docstring"""
return sum(int(_a ) for c in str(abs(_a ) ) )
def _A ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
A = f'{func.__name__}({value})'
A = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(_a )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = (UnCLIPScheduler,)
def lowerCAmelCase__ ( self: List[Any] , **UpperCamelCase_: Any ):
__lowerCamelCase = {
"""num_train_timesteps""": 10_00,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase_ )
return config
def lowerCAmelCase__ ( self: Optional[Any] ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase_ , prev_timestep=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowerCamelCase = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowerCamelCase = scheduler_class(**UpperCamelCase_ )
__lowerCamelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase_ ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=UpperCamelCase_ ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=UpperCamelCase_ ) - -0.001_0011 < 1E-5
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**UpperCamelCase_ )
__lowerCamelCase = scheduler.timesteps
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
__lowerCamelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase_ ):
# 1. predict noise residual
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
__lowerCamelCase = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
__lowerCamelCase = pred_prev_sample
__lowerCamelCase = torch.sum(torch.abs(UpperCamelCase_ ) )
__lowerCamelCase = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(25 )
__lowerCamelCase = scheduler.timesteps
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
__lowerCamelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase_ ):
# 1. predict noise residual
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ )
if i + 1 == timesteps.shape[0]:
__lowerCamelCase = None
else:
__lowerCamelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase = scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , prev_timestep=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
__lowerCamelCase = pred_prev_sample
__lowerCamelCase = torch.sum(torch.abs(UpperCamelCase_ ) )
__lowerCamelCase = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def lowerCAmelCase__ ( self: Any ):
pass
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
| 12 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Optional[int] ) -> Optional[int]:
# test for the above condition
self.test()
def __magic_name__ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : Any =False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE__ : Dict =self.advance()
if not self.does_advance(__lowercase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =self.update(__lowercase )
counter += 1
if counter > 1_00_00:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __magic_name__ ( self : List[str] ) -> int:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def __magic_name__ ( self : List[str] , __lowercase : int ) -> Tuple:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def __magic_name__ ( self : Optional[int] , __lowercase : int ) -> Union[str, Any]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def __magic_name__ ( self : Dict ) -> Tuple:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def __magic_name__ ( self : Any ) -> Optional[Any]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def __magic_name__ ( self : Optional[int] , __lowercase : List[str]=False ) -> int:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Union[str, Any] , __lowercase : List[int] ) -> int:
super(__lowercase , self ).__init__()
if not isinstance(__lowercase , __lowercase ) or len(__lowercase ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(__lowercase , __lowercase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =token_ids
SCREAMING_SNAKE_CASE__ : Dict =len(self.token_ids )
SCREAMING_SNAKE_CASE__ : Tuple =-1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE__ : List[Any] =False
def __magic_name__ ( self : List[str] ) -> List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __magic_name__ ( self : List[Any] , __lowercase : int ) -> List[str]:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__lowercase )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __magic_name__ ( self : Tuple , __lowercase : int ) -> str:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__lowercase )}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =False
SCREAMING_SNAKE_CASE__ : Optional[Any] =False
SCREAMING_SNAKE_CASE__ : str =False
if self.does_advance(__lowercase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE__ : List[Any] =True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE__ : int =True
SCREAMING_SNAKE_CASE__ : Any =completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE__ : int =True
self.reset()
return stepped, completed, reset
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : int =False
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def __magic_name__ ( self : int , __lowercase : int=False ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE__ : Dict =self.seqlen
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.fulfilled_idx
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __lowercase : List[List[int]] , __lowercase : Optional[Any]=True ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max([len(__lowercase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE__ : str =root
for tidx, token_id in enumerate(__lowercase ):
if token_id not in level:
SCREAMING_SNAKE_CASE__ : str ={}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =level[token_id]
if no_subsets and self.has_subsets(__lowercase , __lowercase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F" {nested_token_ids}." )
SCREAMING_SNAKE_CASE__ : str =root
def __magic_name__ ( self : List[str] , __lowercase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE__ : str =start[current_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(start.keys() )
return next_tokens
def __magic_name__ ( self : Dict , __lowercase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.next_tokens(__lowercase )
return len(__lowercase ) == 0
def __magic_name__ ( self : str , __lowercase : Dict ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =list(root.values() )
if len(__lowercase ) == 0:
return 1
else:
return sum([self.count_leaves(__lowercase ) for nn in next_nodes] )
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[str] , __lowercase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple =self.count_leaves(__lowercase )
return len(__lowercase ) != leaf_count
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Any , __lowercase : List[List[int]] ) -> Tuple:
super(__lowercase , self ).__init__()
if not isinstance(__lowercase , __lowercase ) or len(__lowercase ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(__lowercase , __lowercase ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(__lowercase , __lowercase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =DisjunctiveTrie(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =nested_token_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.trie.max_height
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
SCREAMING_SNAKE_CASE__ : Optional[Any] =False
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.trie.next_tokens(self.current_seq )
if len(__lowercase ) == 0:
return None
else:
return token_list
def __magic_name__ ( self : int , __lowercase : int ) -> List[str]:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowercase )}" )
SCREAMING_SNAKE_CASE__ : Tuple =self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowercase )}" )
SCREAMING_SNAKE_CASE__ : Tuple =False
SCREAMING_SNAKE_CASE__ : Optional[int] =False
SCREAMING_SNAKE_CASE__ : Any =False
if self.does_advance(__lowercase ):
self.current_seq.append(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
self.reset()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE__ : Any =completed
return stepped, completed, reset
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =False
SCREAMING_SNAKE_CASE__ : Tuple =[]
def __magic_name__ ( self : List[str] ) -> Union[str, Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int]=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any =DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.seqlen
SCREAMING_SNAKE_CASE__ : str =self.current_seq
SCREAMING_SNAKE_CASE__ : Tuple =self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : List[Constraint] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Dict =constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE__ : Optional[int] =max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(__lowercase )
SCREAMING_SNAKE_CASE__ : int =False
self.init_state()
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[constraint.copy(stateful=__lowercase ) for constraint in self.constraints]
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Any =0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE__ : List[Any] =constraint.advance()
if isinstance(__lowercase , __lowercase ):
token_list.append(__lowercase )
elif isinstance(__lowercase , __lowercase ):
token_list.extend(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.inprogress_constraint.advance()
if isinstance(__lowercase , __lowercase ):
token_list.append(__lowercase )
elif isinstance(__lowercase , __lowercase ):
token_list.extend(__lowercase )
if len(__lowercase ) == 0:
return None
else:
return token_list
def __magic_name__ ( self : Tuple , __lowercase : Optional[List[int]] ) -> Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.add(__lowercase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __magic_name__ ( self : Union[str, Any] , __lowercase : int ) -> Optional[int]:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =False, False
if self.completed:
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : int =False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.inprogress_constraint.update(__lowercase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE__ : str =None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =pending_constraint.update(__lowercase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =None
if not complete and stepped:
SCREAMING_SNAKE_CASE__ : List[str] =pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE__ : List[str] =(
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE__ : str =True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __magic_name__ ( self : Optional[int] , __lowercase : Dict=True ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
constraint.copy(stateful=__lowercase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE__ : List[str] =self.inprogress_constraint.copy(stateful=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =[constraint.copy() for constraint in self.pending_constraints]
return new_state | 222 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """biogpt"""
def __init__( self : str , __lowercase : Union[str, Any]=4_23_84 , __lowercase : Union[str, Any]=10_24 , __lowercase : Any=24 , __lowercase : Any=16 , __lowercase : Optional[Any]=40_96 , __lowercase : Any="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Union[str, Any]=10_24 , __lowercase : List[Any]=0.02 , __lowercase : Tuple=1e-12 , __lowercase : Optional[Any]=True , __lowercase : Optional[Any]=True , __lowercase : Any=0.0 , __lowercase : int=0.0 , __lowercase : str=1 , __lowercase : int=0 , __lowercase : Optional[int]=2 , **__lowercase : Dict , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : int =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple =num_attention_heads
SCREAMING_SNAKE_CASE__ : Any =intermediate_size
SCREAMING_SNAKE_CASE__ : int =hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] =scale_embedding
SCREAMING_SNAKE_CASE__ : str =use_cache
SCREAMING_SNAKE_CASE__ : str =layerdrop
SCREAMING_SNAKE_CASE__ : Dict =activation_dropout
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) | 222 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MobileBertTokenizer
lowerCAmelCase__ = MobileBertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_non_english
lowerCAmelCase__ = """google/mobilebert-uncased"""
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
super().setUp()
__lowercase =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__lowercase =[
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase ='UNwant\u00E9d,running'
__lowercase ='unwanted, running'
return input_text, output_text
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.tokenizer_class(self.vocab_file)
__lowercase =tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [9, 6, 7, 1_2, 1_0, 1_1])
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowercase =self.get_tokenizer()
__lowercase =self.get_rust_tokenizer()
__lowercase ='UNwant\u00E9d,running'
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
__lowercase =rust_tokenizer.tokenize(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =self.get_rust_tokenizer()
__lowercase =tokenizer.encode(_lowerCAmelCase)
__lowercase =rust_tokenizer.encode(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
# With lower casing
__lowercase =self.get_tokenizer(do_lower_case=_lowerCAmelCase)
__lowercase =self.get_rust_tokenizer(do_lower_case=_lowerCAmelCase)
__lowercase ='UNwant\u00E9d,running'
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
__lowercase =rust_tokenizer.tokenize(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =self.get_rust_tokenizer()
__lowercase =tokenizer.encode(_lowerCAmelCase)
__lowercase =rust_tokenizer.encode(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =BasicTokenizer(do_lower_case=_lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =BasicTokenizer(do_lower_case=_lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =BasicTokenizer(do_lower_case=_lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =BasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase ={}
for i, token in enumerate(_lowerCAmelCase):
__lowercase =i
__lowercase =WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
def __lowerCamelCase ( self : str):
'''simple docstring'''
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =self.get_tokenizer()
__lowercase =self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCAmelCase) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCAmelCase) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
@slow
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.tokenizer_class.from_pretrained('google/mobilebert-uncased')
__lowercase =tokenizer.encode('sequence builders' , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase)
__lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase)
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__lowercase =self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__lowercase =tokenizer_r.encode_plus(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , )
__lowercase =tokenizer_r.do_lower_case if hasattr(_lowerCAmelCase , 'do_lower_case') else False
__lowercase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'])
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =['的', '人', '有']
__lowercase =''.join(_lowerCAmelCase)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__lowercase =True
__lowercase =self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase)
__lowercase =tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =False
__lowercase =self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase)
__lowercase =tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase)
# it is expected that only the first Chinese character is not preceded by "##".
__lowercase =[
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_lowerCAmelCase)
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
| 166 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 369 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE__:List[Any] = None
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:List[str] = {
"""camembert-base""": 512,
}
SCREAMING_SNAKE_CASE__:str = """▁"""
class snake_case__ ( snake_case_ ):
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ["""input_ids""", """attention_mask"""]
_snake_case : str = CamembertTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
__a = vocab_file
__a = False if not self.vocab_file else True
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 268 | 0 |
from scipy.stats import spearmanr
import datasets
UpperCAmelCase : Union[str, Any] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
UpperCAmelCase : Dict = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
UpperCAmelCase : List[str] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def UpperCAmelCase_ ( self , _A , _A , _A=False ):
__A : str = spearmanr(_A , _A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 280 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , A : str , A : List[Any]=1_3 , A : Optional[int]=7 , A : List[Any]=True , A : List[str]=True , A : Tuple=True , A : Optional[int]=9_9 , A : str=3_2 , A : Tuple=5 , A : str=4 , A : Any=3_7 , A : str="gelu" , A : int=0.1 , A : Optional[int]=0.1 , A : str=5_1_2 , A : Optional[Any]=1_6 , A : int=2 , A : Optional[int]=0.02 , A : Optional[int]=3 , A : List[str]=4 , A : List[Any]=None , ) ->Optional[Any]:
lowerCamelCase__ : str = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Optional[Any] = seq_length
lowerCamelCase__ : int = is_training
lowerCamelCase__ : Any = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : Optional[int] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Union[str, Any] = num_labels
lowerCamelCase__ : Optional[int] = num_choices
lowerCamelCase__ : Dict = scope
lowerCamelCase__ : Tuple = self.vocab_size - 1
def __lowerCamelCase ( self : int ) ->Optional[int]:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCamelCase ( self : Tuple , A : Optional[Any] , A : int , A : Any , A : Any , *A : str ) ->List[Any]:
lowerCamelCase__ : Union[str, Any] = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
lowerCamelCase__ : Dict = model(A , token_type_ids=A , head_mask=A )
lowerCamelCase__ : Optional[int] = model(A , token_type_ids=A )
lowerCamelCase__ : Tuple = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self : str , A : str , A : List[str] , A : Optional[Any] , A : Optional[Any] , *A : Tuple ) ->List[Any]:
lowerCamelCase__ : Optional[Any] = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
lowerCamelCase__ : Any = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self : List[Any] , A : int , A : Union[str, Any] , A : Optional[Any] , A : Optional[int] , *A : List[Any] ) ->Union[str, Any]:
lowerCamelCase__ : List[str] = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
lowerCamelCase__ : str = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self : Optional[Any] , A : List[str] , A : int , A : Dict , A : Union[str, Any] , *A : str ) ->Union[str, Any]:
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[int] = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : Any ) ->Dict:
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict = config_and_inputs
lowerCamelCase__ : Optional[int] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCAmelCase : Union[str, Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCAmelCase : Union[str, Any] = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self : Optional[Any] , A : Tuple , A : int , A : Any , A : Optional[Any] , A : List[str] ) ->Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __lowerCamelCase ( self : List[str] , A : str , A : Any , A : List[str]=False ) ->Dict:
lowerCamelCase__ : Any = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
lowerCamelCase__ : Optional[int] = inputs_dict['''labels''']
lowerCamelCase__ : int = inputs_dict['''labels''']
lowerCamelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
lowerCamelCase__ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __lowerCamelCase ( self : int ) ->List[str]:
lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self , config_class=A , n_embd=3_7 )
def __lowerCamelCase ( self : Union[str, Any] ) ->int:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Optional[int] ) ->str:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __lowerCamelCase ( self : Tuple ) ->str:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __lowerCamelCase ( self : int ) ->List[Any]:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __lowerCamelCase ( self : str ) ->Optional[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self : Any ) ->Union[str, Any]:
lowerCamelCase__ : Dict = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
lowerCamelCase__ : Tuple = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=A ) # the president is
lowerCamelCase__ : List[Any] = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : Tuple = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A )
| 265 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=True , UpperCAmelCase=2 ) -> str:
"""simple docstring"""
from .. import __version__
lowerCamelCase__ : Optional[Any] = take_from
lowerCamelCase__ : Any = ()
if not isinstance(args[0] , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse(UpperCAmelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowerCamelCase__ : List[Any] = None
if isinstance(UpperCAmelCase , UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCAmelCase ),)
lowerCamelCase__ : Tuple = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
values += (getattr(UpperCAmelCase , UpperCAmelCase ),)
lowerCamelCase__ : Union[str, Any] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowerCamelCase__ : int = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowerCamelCase__ : int = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , UpperCAmelCase , stacklevel=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
lowerCamelCase__ : Tuple = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : Optional[Any] = call_frame.filename
lowerCamelCase__ : List[Any] = call_frame.lineno
lowerCamelCase__ : Optional[Any] = call_frame.function
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCAmelCase ) == 0:
return
elif len(UpperCAmelCase ) == 1:
return values[0]
return values
| 265 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : List[Any] =logging.get_logger(__name__)
def lowerCAmelCase_ ( _lowercase : Optional[Any]) -> List[Any]:
"""simple docstring"""
a__ : int = MobileNetVaConfig(layer_norm_eps=0.001)
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""")
a__ : Union[str, Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowercase)
if matches:
a__ : List[str] = float(matches[1])
a__ : List[str] = int(matches[2])
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
a__ : Any = 1001
a__ : Tuple = """imagenet-1k-id2label.json"""
a__ : Any = """huggingface/label-files"""
a__ : str = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""") , """r"""))
a__ : Optional[Any] = {int(_lowercase) + 1: v for k, v in idalabel.items()}
a__ : Dict = """background"""
a__ : List[Any] = idalabel
a__ : str = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
a__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a__ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase).raw)
return im
@torch.no_grad()
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Dict , _lowercase : int , _lowercase : List[Any]=False) -> int:
"""simple docstring"""
a__ : str = get_mobilenet_va_config(_lowercase)
# Load 🤗 model
a__ : Optional[Any] = MobileNetVaForImageClassification(_lowercase).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowercase , _lowercase , _lowercase)
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
a__ : int = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
a__ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""")
a__ : List[str] = model(**_lowercase)
a__ : Tuple = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
a__ : Tuple = torch.tensor([-4.1739, -1.1233, 3.1205])
elif model_name == "mobilenet_v1_0.75_192":
a__ : Any = torch.tensor([-3.9440, -2.3141, -0.3333])
else:
a__ : Dict = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4)
Path(_lowercase).mkdir(exist_ok=_lowercase)
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowercase)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowercase)
if push_to_hub:
print("""Pushing to the hub...""")
a__ : int = """google/""" + model_name
image_processor.push_to_hub(_lowercase)
model.push_to_hub(_lowercase)
if __name__ == "__main__":
_lowercase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : List[str] =parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 170 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""OwlViTFeatureExtractor"""]
_A = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = MgpstrTokenizer
_lowerCAmelCase : int = False
_lowerCAmelCase : str = {}
_lowerCAmelCase : Tuple = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
snake_case = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = 'tester'
snake_case = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
snake_case = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ) , 1 )
snake_case = tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case ,snake_case = self.get_input_output_texts(lowerCAmelCase )
snake_case = tokenizer.tokenize(lowerCAmelCase )
snake_case = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
snake_case = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
snake_case = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertNotEqual(len(lowerCAmelCase ) , 0 )
snake_case = tokenizer.decode(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(text_a.replace(' ' , '' ) , lowerCAmelCase )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case ( self ):
"""simple docstring"""
pass
| 149 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = """swinv2"""
_lowerCAmelCase : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase=2_24 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=96 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 12, 24] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=32 , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = embed_dim
snake_case = depths
snake_case = len(lowerCAmelCase )
snake_case = num_heads
snake_case = window_size
snake_case = mlp_ratio
snake_case = qkv_bias
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_absolute_embeddings
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case = int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
snake_case = (0, 0, 0, 0)
| 149 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: List[str] = FlaxAutoencoderKL
@property
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = 4
_UpperCAmelCase : str = 3
_UpperCAmelCase : int = (32, 32)
_UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
_UpperCAmelCase : Optional[Any] = jax.random.uniform(A , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _A ( self : Tuple ):
_UpperCAmelCase : Tuple = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_UpperCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
| 31 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase__ : List[str] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase__ : str = {
"yjernite/retribert-base-uncased": 5_12,
}
lowerCAmelCase__ : Dict = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = RetriBertTokenizer
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any="[UNK]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Tuple="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : List[str]="[MASK]" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_ ) != tokenize_chinese_chars
):
__UpperCAmelCase : Tuple = getattr(UpperCAmelCase_ , normalizer_state.pop("type" ) )
__UpperCAmelCase : int = do_lower_case
__UpperCAmelCase : Optional[Any] = strip_accents
__UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars
__UpperCAmelCase : Optional[int] = normalizer_class(**UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=None ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
__UpperCAmelCase : List[str] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 362 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ : Dict = get_tests_dir("fixtures/dummy-config.json")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = 0
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Any = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , "fake-roberta" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("model" , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("bert" , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''new-model'''
try:
AutoConfig.register("new-model" , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 37 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case_ = BitConfig(
conv_layer=__UpperCAmelCase, num_labels=1000, idalabel=__UpperCAmelCase, labelaid=__UpperCAmelCase, )
return config
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
snake_case_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
snake_case_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
snake_case_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
snake_case_ = '''bit.encoder.''' + name
return name
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=False ) -> str:
'''simple docstring'''
snake_case_ = get_config(__UpperCAmelCase )
# load original model from timm
snake_case_ = create_model(__UpperCAmelCase, pretrained=__UpperCAmelCase )
timm_model.eval()
# load state_dict of original model
snake_case_ = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(__UpperCAmelCase )
snake_case_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
snake_case_ = BitForImageClassification(__UpperCAmelCase )
model.eval()
model.load_state_dict(__UpperCAmelCase )
# create image processor
snake_case_ = create_transform(**resolve_data_config({}, model=__UpperCAmelCase ) )
snake_case_ = transform.transforms
snake_case_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ = BitImageProcessor(
do_resize=__UpperCAmelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__UpperCAmelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__UpperCAmelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
snake_case_ = prepare_img()
snake_case_ = transform(__UpperCAmelCase ).unsqueeze(0 )
snake_case_ = processor(__UpperCAmelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCAmelCase, __UpperCAmelCase )
# verify logits
with torch.no_grad():
snake_case_ = model(__UpperCAmelCase )
snake_case_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case_ = timm_model(__UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase, outputs.logits, atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
a : Any = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 56 | """simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
SCREAMING_SNAKE_CASE__ = 2_048
SCREAMING_SNAKE_CASE__ = 4_096
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = os.environ.pop("PROCESS_TRAIN", "false")
SCREAMING_SNAKE_CASE__ = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Any:
"""simple docstring"""
def choose_first(_UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=False ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
snake_case = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
snake_case = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
snake_case = {'id': example['id']}
snake_case = example['annotations']
snake_case = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
snake_case = ['yes'] if 1 in yes_no_answer else ['no']
snake_case = snake_case = []
snake_case = snake_case = []
snake_case = ['<cls>']
else:
snake_case = ['short']
snake_case = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
snake_case = ['long']
snake_case = choose_first(annotation['long_answer'] , is_long_answer=_UpperCamelCase )
snake_case = []
answer.update(_UpperCamelCase )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
snake_case = True
else:
snake_case = False
snake_case = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , _UpperCamelCase ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case = _get_single_answer(_UpperCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case = example['document']['tokens']
snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
snake_case = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
snake_case = example['document']['tokens']
snake_case = answer['start_token']
snake_case = answer['end_token']
snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
snake_case = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
snake_case = doc['is_html'][answer['start_token'] : answer['end_token']]
snake_case = doc['token'][answer['start_token'] : answer['end_token']]
snake_case = ' '.join([old[i] for i in range(len(_UpperCamelCase ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , _UpperCamelCase , end='\n' )
print('Old:' , _UpperCamelCase , end='\n\n' )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : Dict=True ) -> Optional[Any]:
"""simple docstring"""
snake_case = get_context_and_ans(_UpperCamelCase , assertion=_UpperCamelCase )
snake_case = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
snake_case = tokenizer(example['question']['text'] , out['context'] ).input_ids
snake_case = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case = []
snake_case = []
snake_case = input_ids[:q_len]
snake_case = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
snake_case = i + max_length - q_len
snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(_UpperCamelCase ),
"end_token": [-1_0_0] * len(_UpperCamelCase ),
"category": category,
},
}
snake_case = out['context'].split()
snake_case = splitted_context[answer['end_token']]
snake_case = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=_UpperCamelCase , ).input_ids )
snake_case = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=_UpperCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
snake_case = len(tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
snake_case = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
snake_case = answer['start_token']
snake_case = answer['end_token']
if assertion:
snake_case = tokenizer.decode(_UpperCamelCase )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , _UpperCamelCase , end='\n\n' )
if len(_UpperCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
snake_case = input_ids[:q_len]
snake_case = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
snake_case = []
snake_case = []
snake_case = []
snake_case = [] # null, yes, no, long, short
for i in doc_start_indices:
snake_case = i + max_length - q_len
snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
snake_case = start_token - i + q_len
snake_case = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
snake_case = -1_0_0
snake_case = -1_0_0
answers_category.append('null' )
snake_case = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_UpperCamelCase )
answers_end_token.append(_UpperCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(_UpperCamelCase ) )
print('Old:' , tokenizer.decode(_UpperCamelCase ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case = get_strided_contexts_and_ans(
_UpperCamelCase , _UpperCamelCase , doc_stride=_UpperCamelCase , max_length=_UpperCamelCase , assertion=_UpperCamelCase , )
return example
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] ) -> Any:
"""simple docstring"""
with jsonlines.open(_UpperCamelCase , 'a' ) as writer:
for example in tqdm(_UpperCamelCase , total=len(_UpperCamelCase ) , desc='Saving samples ... ' ):
snake_case = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = load_dataset("natural_questions")
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
SCREAMING_SNAKE_CASE__ = data["train" if PROCESS_TRAIN == "true" else "validation"]
SCREAMING_SNAKE_CASE__ = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
SCREAMING_SNAKE_CASE__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
SCREAMING_SNAKE_CASE__ = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
SCREAMING_SNAKE_CASE__ = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 150 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE_ ( *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a : Optional[Any] = image_classifier(UpperCAmelCase_ , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase_) , [
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}],
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}],
] , )
a : Optional[int] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCAmelCase_) , [
[
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
],
[
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
],
[
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
],
[
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
],
[
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Union[str, Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a : Tuple = image_classifier(UpperCAmelCase_ , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(UpperCAmelCase_) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , )
a : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCAmelCase_) , [
[
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
],
[
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
],
[
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
],
[
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
],
[
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
{'score': 0.3_33, 'label': ANY(UpperCAmelCase_)},
],
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[Any] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a : Optional[Any] = image_classifier(UpperCAmelCase_ , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(UpperCAmelCase_) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
a : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCAmelCase_) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Tuple = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a : Dict = image_classifier(UpperCAmelCase_ , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(UpperCAmelCase_) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
a : int = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCAmelCase_) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , ) | 363 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A : ClassVar[Features] = Features({'''audio''': Audio()} )
A : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
A : str = "audio"
A : str = "transcription"
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column], A ):
raise ValueError(F"Column {self.audio_column} is not an Audio type." )
SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.input_schema.copy()
SCREAMING_SNAKE_CASE : List[Any] = features[self.audio_column]
SCREAMING_SNAKE_CASE : Union[str, Any] = input_schema
return task_template
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 251 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 | 1 |
def __lowerCamelCase ( __a :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = 1
A__ = 2
while i * i <= n:
A__ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ = 1
A__ = 1
while True:
i += 1
t_num += i
if count_divisors(__a ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 276 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A : Dict = random.Random()
def __lowerCamelCase ( __a :Dict , __a :str=1.0 , __a :List[Any]=None , __a :List[str]=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A (unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : str=7 , __lowerCAmelCase : List[Any]=4_00 , __lowerCAmelCase : Optional[Any]=20_00 , __lowerCAmelCase : Dict=10 , __lowerCAmelCase : Union[str, Any]=1_60 , __lowerCAmelCase : List[Any]=8 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=40_00 , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[str]=True , ) -> Optional[int]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = padding_value
A__ = sampling_rate
A__ = return_attention_mask
A__ = do_normalize
A__ = feature_size
A__ = chunk_length
A__ = hop_length
def a_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a_ ( self : List[str] , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : int=False ) -> str:
"""simple docstring"""
def _flatten(__lowerCAmelCase : Optional[int] ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = WhisperFeatureExtractor if is_speech_available() else None
def a_ ( self : Any ) -> str:
"""simple docstring"""
A__ = WhisperFeatureExtractionTester(self )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(__lowerCAmelCase )[0]
check_json_file_has_correct_format(__lowerCAmelCase )
A__ = self.feature_extraction_class.from_pretrained(__lowerCAmelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = feat_extract_first.mel_filters
A__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(__lowerCAmelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowerCAmelCase )
A__ = self.feature_extraction_class.from_json_file(__lowerCAmelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = feat_extract_first.mel_filters
A__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
A__ = feature_extractor(__lowerCAmelCase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
# Test batched
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
A__ = np.asarray(__lowerCAmelCase )
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
# Test truncation required
A__ = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
A__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
A__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs_truncated]
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
def a_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
import torch
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = np.random.rand(1_00 , 32 ).astype(np.floataa )
A__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a_ ( self : List[Any] , __lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
A__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ = ds.sort("""id""" ).select(range(__lowerCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a_ ( self : Dict ) -> Any:
"""simple docstring"""
A__ = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
A__ = self._load_datasamples(1 )
A__ = WhisperFeatureExtractor()
A__ = feature_extractor(__lowerCAmelCase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __lowerCAmelCase , atol=1e-4 ) )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = self._load_datasamples(1 )[0]
A__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
A__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(__lowerCAmelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase ) - 1 ) < 1e-3 ) )
| 276 | 1 |
class _snake_case :
def __init__( self , _a , _a=None , _a=None ):
__magic_name__ : Any = data
__magic_name__ : List[str] = previous
__magic_name__ : Union[str, Any] = next_node
def __str__( self ):
return f'''{self.data}'''
def SCREAMING_SNAKE_CASE ( self ):
return self.data
def SCREAMING_SNAKE_CASE ( self ):
return self.next
def SCREAMING_SNAKE_CASE ( self ):
return self.previous
class _snake_case :
def __init__( self , _a ):
__magic_name__ : int = head
def __iter__( self ):
return self
def SCREAMING_SNAKE_CASE ( self ):
if not self.current:
raise StopIteration
else:
__magic_name__ : Optional[Any] = self.current.get_data()
__magic_name__ : int = self.current.get_next()
return value
class _snake_case :
def __init__( self ):
__magic_name__ : Union[str, Any] = None # First node in list
__magic_name__ : List[Any] = None # Last node in list
def __str__( self ):
__magic_name__ : int = self.head
__magic_name__ : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
__magic_name__ : Dict = current.get_next()
return " ".join(str(_a ) for node in nodes )
def __contains__( self , _a ):
__magic_name__ : Union[str, Any] = self.head
while current:
if current.get_data() == value:
return True
__magic_name__ : Union[str, Any] = current.get_next()
return False
def __iter__( self ):
return LinkedListIterator(self.head )
def SCREAMING_SNAKE_CASE ( self ):
if self.head:
return self.head.get_data()
return None
def SCREAMING_SNAKE_CASE ( self ):
if self.tail:
return self.tail.get_data()
return None
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.head is None:
__magic_name__ : Tuple = node
__magic_name__ : List[Any] = node
else:
self.insert_before_node(self.head , _a )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.head is None:
self.set_head(_a )
else:
self.insert_after_node(self.tail , _a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : int = Node(_a )
if self.head is None:
self.set_head(_a )
else:
self.set_tail(_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Dict = node
__magic_name__ : Optional[Any] = node.previous
if node.get_previous() is None:
__magic_name__ : int = node_to_insert
else:
__magic_name__ : Tuple = node_to_insert
__magic_name__ : str = node_to_insert
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : List[Any] = node
__magic_name__ : Tuple = node.next
if node.get_next() is None:
__magic_name__ : Dict = node_to_insert
else:
__magic_name__ : Dict = node_to_insert
__magic_name__ : Optional[Any] = node_to_insert
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : List[Any] = 1
__magic_name__ : Any = Node(_a )
__magic_name__ : int = self.head
while node:
if current_position == position:
self.insert_before_node(_a , _a )
return
current_position += 1
__magic_name__ : Tuple = node.next
self.insert_after_node(self.tail , _a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = self.head
while node:
if node.get_data() == item:
return node
__magic_name__ : Tuple = node.get_next()
raise Exception("Node not found" )
def SCREAMING_SNAKE_CASE ( self , _a ):
if (node := self.get_node(_a )) is not None:
if node == self.head:
__magic_name__ : Optional[int] = self.head.get_next()
if node == self.tail:
__magic_name__ : Optional[Any] = self.tail.get_previous()
self.remove_node_pointers(_a )
@staticmethod
def SCREAMING_SNAKE_CASE ( _a ):
if node.get_next():
__magic_name__ : Any = node.previous
if node.get_previous():
__magic_name__ : Optional[int] = node.next
__magic_name__ : str = None
__magic_name__ : int = None
def SCREAMING_SNAKE_CASE ( self ):
return self.head is None
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
def __lowerCamelCase ( ):
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_UpperCAmelCase : Union[str, Any] = generate_large_matrix()
_UpperCAmelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid )
assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(UpperCamelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case_ = (left + right) // 2
snake_case_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case_ = mid + 1
else:
snake_case_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(grid[0] )
for i in range(len(UpperCamelCase__ ) ):
snake_case_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCamelCase__ ) * len(grid[0] )) - total
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
for row in grid:
for i, number in enumerate(UpperCamelCase__ ):
if number < 0:
total += len(UpperCamelCase__ ) - i
break
return total
def __lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print('Running benchmarks' )
snake_case_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 285 | 0 |
from __future__ import annotations
lowerCAmelCase__ : Tuple =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a__ ( A__, A__, A__, A__, A__, ):
SCREAMING_SNAKE_CASE_ : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A__ ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A__ ) )
] # the action grid
SCREAMING_SNAKE_CASE_ : Dict = init[0]
SCREAMING_SNAKE_CASE_ : List[str] = init[1]
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : str = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ : str = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ : Any = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ : Optional[int] = False # flag set if we can't find expand
while not found and not resign:
if len(A__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ : Optional[Any] = cell.pop()
SCREAMING_SNAKE_CASE_ : int = next_cell[2]
SCREAMING_SNAKE_CASE_ : Optional[int] = next_cell[3]
SCREAMING_SNAKE_CASE_ : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ : List[Any] = True
else:
for i in range(len(A__ ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ : Dict = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = g + cost
SCREAMING_SNAKE_CASE_ : Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : int = goal[0]
SCREAMING_SNAKE_CASE_ : Dict = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ : str = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = xa
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ : int = []
for i in range(len(A__ ) ):
path.append(invpath[len(A__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCAmelCase__ : int =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCAmelCase__ : Tuple =[0, 0]
# all coordinates are given in format [y,x]
lowerCAmelCase__ : Any =[len(grid) - 1, len(grid[0]) - 1]
lowerCAmelCase__ : Union[str, Any] =1
# the cost map which pushes the path closer to the goal
lowerCAmelCase__ : Tuple =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCAmelCase__ : Optional[Any] =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCAmelCase__ : Optional[int] =99
lowerCAmelCase__ : int =search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 365 |
import socket
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Dict = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE_ : Any = socket.gethostname()
SCREAMING_SNAKE_CASE_ : List[str] = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file', 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
SCREAMING_SNAKE_CASE_ : Tuple = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(A__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 162 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "tokenizer"]
__UpperCamelCase = "CLIPImageProcessor"
__UpperCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[Any] , lowercase_ : Dict=None , lowercase_ : List[str]=None , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''feature_extractor''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(lowercase_ , lowercase_)
def __call__( self : str , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Dict=None , **lowercase_ : Any):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if images is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str , *lowercase_ : int , **lowercase_ : Any):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , )
return self.image_processor
| 91 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Dict = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : str = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
| 91 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
UpperCAmelCase : Union[str, Any] = F"https://www.google.com/search?q={query}&num=100"
UpperCAmelCase : Tuple = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
UpperCAmelCase : List[Any] = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
UpperCAmelCase : Any = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 313 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list:
'''simple docstring'''
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 313 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = "Salesforce/blip-image-captioning-base"
__snake_case : Optional[int] = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
__snake_case : Union[str, Any] = "image_captioner"
__snake_case : Optional[Any] = AutoModelForVisionaSeq
__snake_case : str = ["image"]
__snake_case : str = ["text"]
def __init__( self : Any ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""vision"""] )
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : "Image" ) -> Tuple:
'''simple docstring'''
return self.pre_processor(images=lowerCamelCase__ ,return_tensors="""pt""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )[0].strip()
| 296 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( __magic_name__ ):
lowercase = ['''image_processor''', '''tokenizer''']
lowercase = '''CLIPImageProcessor'''
lowercase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , A=None , A=None , **A ) -> Union[str, Any]:
UpperCAmelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
UpperCAmelCase : List[Any] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
def __call__( self , A=None , A=None , A=None , **A ) -> Tuple:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase : int = self.tokenizer(A , return_tensors=A , **A )
if images is not None:
UpperCAmelCase : List[str] = self.image_processor(A , return_tensors=A , **A )
if text is not None and images is not None:
UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def _lowercase( self , *A , **A ) -> str:
return self.tokenizer.batch_decode(*A , **A )
def _lowercase( self , *A , **A ) -> List[str]:
return self.tokenizer.decode(*A , **A )
@property
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = self.tokenizer.model_input_names
UpperCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def _lowercase( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 353 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : List[str] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def __lowerCamelCase ( _lowercase = 1_0_0 ) -> int:
UpperCAmelCase : int = 1
UpperCAmelCase : str = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase : Tuple = pre_numerator
UpperCAmelCase : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase : Union[str, Any] = cur_numerator
UpperCAmelCase : Optional[int] = e_cont * pre_numerator + temp
return sum_digits(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 338 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a ={
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
a ={
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
a ={
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = BertTokenizer
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[UNK]" ,SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" ,SCREAMING_SNAKE_CASE__ : str="[PAD]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[CLS]" ,SCREAMING_SNAKE_CASE__ : str="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
super().__init__(
SCREAMING_SNAKE_CASE__ ,tokenizer_file=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' ,SCREAMING_SNAKE_CASE__) != do_lower_case
or normalizer_state.get('strip_accents' ,SCREAMING_SNAKE_CASE__) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,SCREAMING_SNAKE_CASE__) != tokenize_chinese_chars
):
__lowerCamelCase : Any = getattr(SCREAMING_SNAKE_CASE__ ,normalizer_state.pop('type'))
__lowerCamelCase : str = do_lower_case
__lowerCamelCase : Any = strip_accents
__lowerCamelCase : Any = tokenize_chinese_chars
__lowerCamelCase : int = normalizer_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = do_lower_case
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple=None):
__lowerCamelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : str = [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ ,name=SCREAMING_SNAKE_CASE__)
return tuple(SCREAMING_SNAKE_CASE__)
| 73 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Any = '<pad>'
lowerCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : int = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : List[str] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : List[str] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name )
lowerCAmelCase_ : Tuple = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = pickle.dumps(SCREAMING_SNAKE_CASE_ )
pickle.loads(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : Dict = self.get_rust_tokenizer()
lowerCAmelCase_ : Tuple = 'I was born in 92000, and this is falsé.'
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self.get_rust_tokenizer()
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Any = 'Hello World!'
lowerCAmelCase_ : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase_ : int = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
# fmt: off
lowerCAmelCase_ : List[str] = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 224 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["image_processor", "tokenizer"]
__UpperCAmelCase : List[str] = "BlipImageProcessor"
__UpperCAmelCase : List[str] = "AutoTokenizer"
def __init__( self : int , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : List[Any] ) -> Union[str, Any]:
super().__init__(lowerCamelCase , lowerCamelCase )
# add QFormer tokenizer
__snake_case : Any = qformer_tokenizer
def __call__( self : List[str] , lowerCamelCase : ImageInput = None , lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Union[bool, str, TruncationStrategy] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 0 , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : Tuple , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__snake_case : List[str] = BatchFeature()
if text is not None:
__snake_case : int = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
encoding.update(lowerCamelCase )
__snake_case : Optional[int] = self.qformer_tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
__snake_case : List[str] = qformer_text_encoding.pop("input_ids" )
__snake_case : int = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__snake_case : Optional[Any] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase )
encoding.update(lowerCamelCase )
return encoding
def __snake_case ( self : List[str] , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ) -> str:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[str] , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ) -> str:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : Dict ) -> Any:
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __snake_case ( self : List[Any] , lowerCamelCase : List[str] , **lowerCamelCase : Any ) -> Any:
if os.path.isfile(lowerCamelCase ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
__snake_case : List[Any] = os.path.join(lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(lowerCamelCase )
return super().save_pretrained(lowerCamelCase , **lowerCamelCase )
@classmethod
def __snake_case ( cls : Union[str, Any] , lowerCamelCase : int , **lowerCamelCase : List[Any] ) -> Union[str, Any]:
__snake_case : int = AutoTokenizer.from_pretrained(lowerCamelCase , subfolder="qformer_tokenizer" )
__snake_case : str = cls._get_arguments_from_pretrained(lowerCamelCase , **lowerCamelCase )
args.append(lowerCamelCase )
return cls(*lowerCamelCase ) | 353 |
from ..utils import DummyObject, requires_backends
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = ["speech"]
def __init__( self : List[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ) -> Dict:
requires_backends(self , ["speech"] )
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["speech"]
def __init__( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(self , ["speech"] )
| 134 | 0 |
from math import factorial
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int):
if successes > trials:
raise ValueError("successes must be lower or equal to trials")
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers")
if not isinstance(__A , __A) or not isinstance(__A , __A):
raise ValueError("the function is defined for non-negative integers")
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0")
lowercase__ : List[str] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase__ : str = float(factorial(__A))
coefficient /= factorial(__A) * factorial(trials - successes)
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 87 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
lowercase : Tuple = parser.parse_args()
lowercase : Optional[int] = "cpu"
lowercase : Optional[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
lowercase : Optional[int] = "path-to-your-trained-model"
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Optional[int] = torch.rand(1) * 999
lowercase : Optional[Any] = torch.randn(2, 77, 768)
lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : List[str] = 666
lowercase : Tuple = torch.Generator(device).manual_seed(seed)
lowercase : Union[str, Any] = {"generator": generator}
if args.steps is not None:
lowercase : Dict = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : List[str] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 42 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase = {
'''roberta-base''': 5_12,
'''roberta-large''': 5_12,
'''roberta-large-mnli''': 5_12,
'''distilroberta-base''': 5_12,
'''roberta-base-openai-detector''': 5_12,
'''roberta-large-openai-detector''': 5_12,
}
class __a ( __UpperCamelCase ):
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['input_ids', 'attention_mask']
__lowercase : Tuple = RobertaTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
lowercase__: str = getattr(lowerCAmelCase__ , pre_tok_state.pop('type' ) )
lowercase__: int = add_prefix_space
lowercase__: Union[str, Any] = pre_tok_class(**lowerCAmelCase__ )
lowercase__: Dict = add_prefix_space
lowercase__: Any = 'post_processor'
lowercase__: int = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
lowercase__: List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__: List[str] = tuple(state['sep'] )
if "cls" in state:
lowercase__: Optional[Any] = tuple(state['cls'] )
lowercase__: List[str] = False
if state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
lowercase__: List[Any] = add_prefix_space
lowercase__: Tuple = True
if state.get('trim_offsets' , lowerCAmelCase__ ) != trim_offsets:
lowercase__: Union[str, Any] = trim_offsets
lowercase__: Dict = True
if changes_to_apply:
lowercase__: Union[str, Any] = getattr(lowerCAmelCase__ , state.pop('type' ) )
lowercase__: Optional[int] = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
lowercase__: Dict = value
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__: Optional[Any] = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__: Tuple = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__: str = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Any = [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 356 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__lowerCAmelCase = logging.getLogger(__name__)
def snake_case_ ( snake_case , snake_case ) -> Optional[int]:
lowercase__: Optional[int] = np.argmax(snake_case , axis=1 )
return np.sum(outputs == labels )
def snake_case_ ( snake_case ) -> Dict:
with open(snake_case , encoding='utf_8' ) as f:
lowercase__: str = csv.reader(snake_case )
lowercase__: int = []
next(snake_case ) # skip the first line
for line in tqdm(snake_case ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple:
lowercase__: List[Any] = []
for dataset in encoded_datasets:
lowercase__: Dict = len(snake_case )
lowercase__: int = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowercase__: int = np.zeros((n_batch, 2) , dtype=np.intaa )
lowercase__: Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
lowercase__: Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(snake_case ):
lowercase__: List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowercase__: List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowercase__: Union[str, Any] = with_conta
lowercase__: List[Any] = with_conta
lowercase__: Any = len(snake_case ) - 1
lowercase__: Dict = len(snake_case ) - 1
lowercase__: Optional[Any] = with_conta
lowercase__: Tuple = with_conta
lowercase__: int = mc_label
lowercase__: Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(snake_case ) for t in all_inputs ) )
return tensor_datasets
def snake_case_ ( ) -> Union[str, Any]:
lowercase__: Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=snake_case , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=snake_case , type=snake_case , required=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=snake_case , default='' )
parser.add_argument('--eval_dataset' , type=snake_case , default='' )
parser.add_argument('--seed' , type=snake_case , default=42 )
parser.add_argument('--num_train_epochs' , type=snake_case , default=3 )
parser.add_argument('--train_batch_size' , type=snake_case , default=8 )
parser.add_argument('--eval_batch_size' , type=snake_case , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=snake_case , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=snake_case , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=snake_case , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=snake_case , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=snake_case , default=0.0_1 )
parser.add_argument('--lm_coef' , type=snake_case , default=0.9 )
parser.add_argument('--n_valid' , type=snake_case , default=3_74 )
parser.add_argument('--server_ip' , type=snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=snake_case , default='' , help='Can be used for distant debugging.' )
lowercase__: List[str] = parser.parse_args()
print(snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowercase__: Any = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowercase__: Tuple = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(snake_case , snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowercase__: Any = ['_start_', '_delimiter_', '_classify_']
lowercase__: Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(snake_case )
lowercase__: int = tokenizer.convert_tokens_to_ids(snake_case )
lowercase__: int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(snake_case ) )
model.to(snake_case )
# Load and encode the datasets
def tokenize_and_encode(snake_case ):
if isinstance(snake_case , snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case ) )
elif isinstance(snake_case , snake_case ):
return obj
return [tokenize_and_encode(snake_case ) for o in obj]
logger.info('Encoding dataset...' )
lowercase__: Dict = load_rocstories_dataset(args.train_dataset )
lowercase__: Dict = load_rocstories_dataset(args.eval_dataset )
lowercase__: str = (train_dataset, eval_dataset)
lowercase__: Any = tokenize_and_encode(snake_case )
# Compute the max input length for the Transformer
lowercase__: Optional[Any] = model.config.n_positions // 2 - 2
lowercase__: Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowercase__: List[str] = min(snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowercase__: str = pre_process_datasets(snake_case , snake_case , snake_case , *snake_case )
lowercase__ , lowercase__: Optional[Any] = tensor_datasets[0], tensor_datasets[1]
lowercase__: List[str] = TensorDataset(*snake_case )
lowercase__: Dict = RandomSampler(snake_case )
lowercase__: Optional[int] = DataLoader(snake_case , sampler=snake_case , batch_size=args.train_batch_size )
lowercase__: str = TensorDataset(*snake_case )
lowercase__: str = SequentialSampler(snake_case )
lowercase__: Optional[Any] = DataLoader(snake_case , sampler=snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowercase__: Union[str, Any] = args.max_steps
lowercase__: Tuple = args.max_steps // (len(snake_case ) // args.gradient_accumulation_steps) + 1
else:
lowercase__: Optional[Any] = len(snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
lowercase__: str = list(model.named_parameters() )
lowercase__: Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowercase__: str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowercase__: Tuple = AdamW(snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
lowercase__: Tuple = get_linear_schedule_with_warmup(
snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=snake_case )
if args.do_train:
lowercase__ , lowercase__ , lowercase__: int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowercase__: str = 0
lowercase__: Optional[Any] = 0
lowercase__: List[Any] = tqdm(snake_case , desc='Training' )
for step, batch in enumerate(snake_case ):
lowercase__: Union[str, Any] = tuple(t.to(snake_case ) for t in batch )
lowercase__ , lowercase__ , lowercase__ , lowercase__: List[Any] = batch
lowercase__: List[str] = model(snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case )
lowercase__: Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowercase__: Union[str, Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowercase__: Tuple = 'Training loss: {:.2e} lr: {:.2e}'.format(snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowercase__: Any = model.module if hasattr(snake_case , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowercase__: Tuple = os.path.join(args.output_dir , snake_case )
lowercase__: List[str] = os.path.join(args.output_dir , snake_case )
torch.save(model_to_save.state_dict() , snake_case )
model_to_save.config.to_json_file(snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowercase__: Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowercase__: Any = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(snake_case )
if args.do_eval:
model.eval()
lowercase__ , lowercase__: Optional[Any] = 0, 0
lowercase__ , lowercase__: List[Any] = 0, 0
for batch in tqdm(snake_case , desc='Evaluating' ):
lowercase__: str = tuple(t.to(snake_case ) for t in batch )
lowercase__ , lowercase__ , lowercase__ , lowercase__: Union[str, Any] = batch
with torch.no_grad():
lowercase__ , lowercase__ , lowercase__ , lowercase__: Any = model(
snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case )
lowercase__: Dict = mc_logits.detach().cpu().numpy()
lowercase__: Tuple = mc_labels.to('cpu' ).numpy()
lowercase__: Dict = accuracy(snake_case , snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowercase__: Optional[int] = eval_loss / nb_eval_steps
lowercase__: Optional[int] = eval_accuracy / nb_eval_examples
lowercase__: int = tr_loss / nb_tr_steps if args.do_train else None
lowercase__: Optional[Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowercase__: Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , snake_case , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 288 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
move_disk(_lowerCamelCase , _lowerCamelCase )
move_tower(height - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
print("moving disk from" , _lowerCamelCase , "to" , _lowerCamelCase )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = int(input("Height of hanoi: " ).strip() )
move_tower(_lowerCamelCase , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 36 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
return 0.0
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 512
_lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 36 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : str = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Any = seq_length
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Dict = type_vocab_size
UpperCamelCase : Tuple = type_sequence_label_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : int = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : Tuple = scope
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[str] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
UpperCamelCase
) : List[str] = config_and_inputs
UpperCamelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
__UpperCamelCase : int = ()
__UpperCamelCase : Dict = {} if is_torch_available() else {}
__UpperCamelCase : str = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = EsmFoldModelTester(self )
UpperCamelCase : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )['''positions''']
UpperCamelCase : List[str] = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 366 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315 | 0 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="▁"
_lowerCamelCase =get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class a_ ( a_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = BertGenerationTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def _lowerCAmelCase ( self : str ):
super().setUp()
SCREAMING_SNAKE_CASE =BertGenerationTokenizer(snake_case ,keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE ='<s>'
SCREAMING_SNAKE_CASE =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) ,snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) ,snake_case )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<unk>' )
self.assertEqual(vocab_keys[1] ,'<s>' )
self.assertEqual(vocab_keys[-1] ,'<pad>' )
self.assertEqual(len(snake_case ) ,1002 )
def _lowerCAmelCase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size ,1000 )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =BertGenerationTokenizer(snake_case ,keep_accents=snake_case )
SCREAMING_SNAKE_CASE =tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) ,[285, 46, 10, 170, 382] ,)
SCREAMING_SNAKE_CASE =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
SCREAMING_SNAKE_CASE =tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
SCREAMING_SNAKE_CASE =tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
@cached_property
def _lowerCAmelCase ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE ='Hello World!'
SCREAMING_SNAKE_CASE =[18536, 2260, 101]
self.assertListEqual(snake_case ,self.big_tokenizer.encode(snake_case ) )
@slow
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(snake_case ,self.big_tokenizer.encode(snake_case ) )
@require_torch
@slow
def _lowerCAmelCase ( self : Optional[int] ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
SCREAMING_SNAKE_CASE =list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE =' '.join(snake_case )
SCREAMING_SNAKE_CASE =self.big_tokenizer.encode_plus(snake_case ,return_tensors='pt' ,return_token_type_ids=snake_case )
SCREAMING_SNAKE_CASE =self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] ,return_tensors='pt' ,return_token_type_ids=snake_case )
SCREAMING_SNAKE_CASE =BertGenerationConfig()
SCREAMING_SNAKE_CASE =BertGenerationEncoder(snake_case )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case )
model(**snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case ,model_name='google/bert_for_seq_generation_L-24_bbc_encoder' ,revision='c817d1fd1be2ffa69431227a1fe320544943d4db' ,)
| 334 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ):
'''simple docstring'''
if gpta_config_file == "":
lowerCamelCase = GPTaConfig()
else:
lowerCamelCase = GPTaConfig.from_json_file(lowerCamelCase__ )
lowerCamelCase = GPTaModel(lowerCamelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
lowerCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 252 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : str = logging.get_logger(__name__)
a : List[str] = {'''vocab_file''': '''spiece.model'''}
a : List[Any] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
a : List[str] = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class a ( _UpperCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['input_ids', 'attention_mask']
snake_case_ = []
def __init__( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[Any]="<unk>" , lowercase_ : Tuple="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : int="[SEP]" , lowercase_ : str="[MASK]" , lowercase_ : Optional[int]="[CLS]" , lowercase_ : Optional[Any] = None , **lowercase_ : int , ):
snake_case_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
snake_case_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
snake_case_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
snake_case_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
snake_case_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
snake_case_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def A_ ( self : Tuple ):
return self.sp_model.get_piece_size()
def A_ ( self : int ):
snake_case_ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Optional[int] , lowercase_ : Optional[Any] ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : int , lowercase_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def A_ ( self : Optional[int] , lowercase_ : Tuple ):
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def A_ ( self : Union[str, Any] , lowercase_ : List[Any] ):
snake_case_ = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def A_ ( self : List[str] , lowercase_ : int ):
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
snake_case_ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def A_ ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : Dict = False , lowercase_ : int = None , lowercase_ : Any = True , **lowercase_ : List[Any] , ):
snake_case_ = kwargs.pop('''use_source_tokenizer''' , SCREAMING_SNAKE_CASE_ )
snake_case_ = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) )
snake_case_ = []
sub_texts.append(SCREAMING_SNAKE_CASE_ )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(SCREAMING_SNAKE_CASE_ ) )
else:
snake_case_ = """""".join(SCREAMING_SNAKE_CASE_ )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(SCREAMING_SNAKE_CASE_ )
return clean_text
else:
return text
def A_ ( self : Any , lowercase_ : List[str] , lowercase_ : Any = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
def A_ ( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def A_ ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Any] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def A_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 355 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
@property
def A_ ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def A_ ( self : Dict ):
snake_case_ = self.dummy_uncond_unet
snake_case_ = ScoreSdeVeScheduler()
snake_case_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowercase_ ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowercase_ , return_dict=lowercase_ )[
0
]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a ( unittest.TestCase ):
def A_ ( self : Optional[int] ):
snake_case_ = '''google/ncsnpp-church-256'''
snake_case_ = UNetaDModel.from_pretrained(lowercase_ )
snake_case_ = ScoreSdeVeScheduler.from_pretrained(lowercase_ )
snake_case_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=lowercase_ ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 72 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING
_A : int = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case_ :Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
snake_case_ :str = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
snake_case_ :List[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
snake_case_ :Any = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
snake_case_ :List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
snake_case_ :Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
snake_case_ :Union[str, Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
snake_case_ :str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
snake_case_ :List[Any] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def lowerCAmelCase_ ( self: Any ) -> str:
snake_case_ :Optional[int] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
snake_case_ :List[str] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case , snake_case )
@slow
@require_torch
def lowerCAmelCase_ ( self: Dict ) -> Dict:
snake_case_ :Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(snake_case )
@slow
@require_tf
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_ :Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: List[Any] ) -> Union[str, Any]:
snake_case_ :Optional[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case ) , [
{"""sequence""": """My name is John""", """score""": 0.0_0_8, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.0_0_7, """token""": 1_573, """token_str""": """ Chris"""},
] , )
snake_case_ :List[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.2_5_1,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.2_1_4,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
snake_case_ :int = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.0_0_5, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.0_0_0, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.0_0_0, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCAmelCase_ ( self: Dict ) -> Optional[int]:
snake_case_ :str = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
snake_case_ :Any = None
snake_case_ :Tuple = None
self.run_pipeline_test(snake_case , [] )
@require_tf
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
snake_case_ :int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
snake_case_ :List[str] = None
snake_case_ :List[Any] = None
self.run_pipeline_test(snake_case , [] )
def lowerCAmelCase_ ( self: List[Any] , snake_case: int , snake_case: Tuple , snake_case: Optional[int] ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :str = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def lowerCAmelCase_ ( self: Any , snake_case: Optional[Any] , snake_case: Tuple ) -> Union[str, Any]:
snake_case_ :Any = fill_masker.tokenizer
snake_case_ :List[Any] = fill_masker.model
snake_case_ :int = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Optional[int] = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Union[str, Any] = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
snake_case , [
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
] , )
with self.assertRaises(snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case ):
fill_masker("""This is""" )
self.run_test_top_k(snake_case , snake_case )
self.run_test_targets(snake_case , snake_case )
self.run_test_top_k_targets(snake_case , snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case , snake_case )
self.fill_mask_with_multiple_masks(snake_case , snake_case )
def lowerCAmelCase_ ( self: Any , snake_case: int , snake_case: int ) -> int:
snake_case_ :List[str] = tokenizer.get_vocab()
snake_case_ :Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case_ :Any = FillMaskPipeline(model=snake_case , tokenizer=snake_case , targets=snake_case )
snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , snake_case )
snake_case_ :Optional[int] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case ) )
# Call argument
snake_case_ :int = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , snake_case )
snake_case_ :Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case ) )
# Score equivalence
snake_case_ :Tuple = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
snake_case_ :Any = [top_mask["""token_str"""] for top_mask in outputs]
snake_case_ :Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ) == set(snake_case ):
snake_case_ :Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
snake_case_ :Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
# Raises with invalid
with self.assertRaises(snake_case ):
snake_case_ :Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case ):
snake_case_ :int = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] )
with self.assertRaises(snake_case ):
snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" )
def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: List[str] ) -> Union[str, Any]:
snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case , top_k=2 )
snake_case_ :str = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Optional[int] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def lowerCAmelCase_ ( self: Any , snake_case: List[str] , snake_case: List[Any] ) -> Tuple:
snake_case_ :Tuple = tokenizer.get_vocab()
snake_case_ :List[str] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
# top_k=2, ntargets=3
snake_case_ :List[Any] = sorted(vocab.keys() )[:3]
snake_case_ :Any = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case_ :str = [el["""token_str"""] for el in sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ).issubset(snake_case ):
snake_case_ :Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] , snake_case: Dict ) -> Tuple:
snake_case_ :List[str] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Dict = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case_ :Optional[Any] = sorted(vocab.keys() )[:3]
snake_case_ :Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case_ :str = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case ) , 3 )
def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict ) -> int:
snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Dict = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
] , )
| 66 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__a = pd.read_csv("sample_data.csv", header=None)
__a = df.shape[:1][0]
# If you're using some other dataset input the target column
__a = df.iloc[:, 1:2]
__a = actual_data.values.reshape(len_data, 1)
__a = MinMaxScaler().fit_transform(actual_data)
__a = 10
__a = 5
__a = 20
__a = len_data - periods * look_back
__a = actual_data[:division]
__a = actual_data[division - look_back :]
__a , __a = [], []
__a , __a = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__a = np.array(train_x)
__a = np.array(test_x)
__a = np.array([list(i.ravel()) for i in train_y])
__a = np.array([list(i.ravel()) for i in test_y])
__a = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__a = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__a = model.predict(x_test)
| 66 | 1 |
"""simple docstring"""
def lowercase ( a__ : int = 10 ) -> str:
if not isinstance(a__ , a__ ) or n < 0:
raise ValueError('''Invalid input''' )
_UpperCamelCase = 10**n
_UpperCamelCase = 28433 * (pow(2 , 7830457 , a__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 54 | """simple docstring"""
import qiskit
def lowercase ( a__ : int = 2 ) -> qiskit.result.counts.Counts:
_UpperCamelCase = qubits
# Using Aer's simulator
_UpperCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
_UpperCamelCase = qiskit.QuantumCircuit(a__ , a__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , a__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , a__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(a__ ) ) , list(range(a__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_UpperCamelCase = qiskit.execute(a__ , a__ , shots=1000 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 54 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''BeitFeatureExtractor''']
UpperCAmelCase__ = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
class _a :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase = {} # Mapping from char to TrieNode
UpperCAmelCase = False
def A ( self : int , lowercase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowercase )
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase = TrieNode()
UpperCAmelCase = curr.nodes[char]
UpperCAmelCase = True
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def A ( self : str , lowercase : str ):
'''simple docstring'''
def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool:
if index == len(lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase = False
return len(curr.nodes ) == 0
UpperCAmelCase = word[index]
UpperCAmelCase = curr.nodes.get(lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase = _delete(lowercase , lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowercase , 0 )
def snake_case_ (_a : TrieNode , _a : str ):
if node.is_leaf:
print(_a , end=''' ''' )
for key, value in node.nodes.items():
print_words(_a , word + key )
def snake_case_ ():
UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase = TrieNode()
root.insert_many(_a )
# print_words(root, "")
assert all(root.find(_a ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def snake_case_ (_a : str , _a : bool ):
print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' )
def snake_case_ ():
assert test_trie()
def snake_case_ ():
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 34 | 0 |
import os
from pathlib import Path
def UpperCamelCase_( ) -> List[Any]:
from torch.utils.cpp_extension import load
UpperCAmelCase__ = Path(snake_case__ ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
UpperCAmelCase__ = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , snake_case__ , with_cuda=snake_case__ , extra_include_paths=[str(snake_case__ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 350 |
class lowercase : # Public class to implement a graph
'''simple docstring'''
def __init__(self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = row
UpperCAmelCase__ = col
UpperCAmelCase__ = graph
def UpperCamelCase__ (self , __a , __a , __a ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __a )
def UpperCamelCase__ (self ) -> int: # And finally, count all islands.
"""simple docstring"""
UpperCAmelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__a , __a , __a )
count += 1
return count
| 335 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCAmelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299 |
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = int(_SCREAMING_SNAKE_CASE )
assert noofclusters < len(_SCREAMING_SNAKE_CASE )
# Find out the dimensionality
lowercase__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase__ = list(range(len(_SCREAMING_SNAKE_CASE ) ) )
shuffle(_SCREAMING_SNAKE_CASE )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_SCREAMING_SNAKE_CASE )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase__ = tf.placeholder('float64' , [dim] )
lowercase__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase__ = [tf.Variable(0 ) for i in range(len(_SCREAMING_SNAKE_CASE ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase__ = tf.placeholder('int32' )
lowercase__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase__ = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase__ = tf.reduce_mean(_SCREAMING_SNAKE_CASE , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase__ = tf.placeholder('float' , [dim] )
lowercase__ = tf.placeholder('float' , [dim] )
lowercase__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase__ = tf.placeholder('float' , [noofclusters] )
lowercase__ = tf.argmin(_SCREAMING_SNAKE_CASE , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(_SCREAMING_SNAKE_CASE )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase__ = 100
for _ in range(_SCREAMING_SNAKE_CASE ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase__ = [
sess.run(_SCREAMING_SNAKE_CASE , feed_dict={va: vect, va: sess.run(_SCREAMING_SNAKE_CASE )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase__ = sess.run(
_SCREAMING_SNAKE_CASE , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_SCREAMING_SNAKE_CASE ):
# Collect all the vectors assigned to this cluster
lowercase__ = [
vectors[i]
for i in range(len(_SCREAMING_SNAKE_CASE ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase__ = sess.run(
_SCREAMING_SNAKE_CASE , feed_dict={mean_input: array(_SCREAMING_SNAKE_CASE )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase__ = sess.run(_SCREAMING_SNAKE_CASE )
lowercase__ = sess.run(_SCREAMING_SNAKE_CASE )
return centroids, assignments
| 269 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = 'xlnet'
_UpperCamelCase : Optional[Any] = ['mems']
_UpperCamelCase : Union[str, Any] = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , a : int=32_000 , a : Any=1_024 , a : Optional[Any]=24 , a : str=16 , a : int=4_096 , a : List[str]="gelu" , a : Any=True , a : Dict="bi" , a : str=0.02 , a : List[str]=1E-1_2 , a : Tuple=0.1 , a : Optional[Any]=512 , a : Tuple=None , a : Union[str, Any]=True , a : int=False , a : int=False , a : Tuple=-1 , a : Any=False , a : Tuple="last" , a : List[str]=True , a : Optional[Any]="tanh" , a : List[Any]=0.1 , a : int=5 , a : List[Any]=5 , a : Optional[int]=5 , a : Dict=1 , a : Optional[Any]=2 , **a : List[Any] , )-> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = n_layer
lowercase__ = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
lowercase__ = d_model // n_head
lowercase__ = ff_activation
lowercase__ = d_inner
lowercase__ = untie_r
lowercase__ = attn_type
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = dropout
lowercase__ = mem_len
lowercase__ = reuse_len
lowercase__ = bi_data
lowercase__ = clamp_len
lowercase__ = same_length
lowercase__ = summary_type
lowercase__ = summary_use_proj
lowercase__ = summary_activation
lowercase__ = summary_last_dropout
lowercase__ = start_n_top
lowercase__ = end_n_top
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , a , )
lowercase__ = kwargs['use_cache']
lowercase__ = use_mems_eval
lowercase__ = use_mems_train
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Union[str, Any] )-> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 269 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[Any] , snake_case :str ):
'''simple docstring'''
A_ : Tuple = value
A_ : Node | None = None
A_ : Node | None = None
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[int] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = tree
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Optional[int] ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self :Tuple ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCamelCase :Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
super().__init__()
self.register_modules(
vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def _a (self , lowercase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def _a (self ):
self.enable_attention_slicing(lowercase )
@torch.no_grad()
def __call__(self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , lowercase = None , **lowercase , ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = 1
elif isinstance(lowercase , lowercase ):
A_ : Any = len(lowercase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowercase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowercase )}.' )
# get prompt text embeddings
A_ : Optional[Any] = self.tokenizer(
lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
A_ : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_, A_, A_ : Tuple = text_embeddings.shape
A_ : Optional[Any] = text_embeddings.repeat(1 , lowercase , 1 )
A_ : Any = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : Optional[int] = [""""""]
elif type(lowercase ) is not type(lowercase ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowercase )} !='
F' {type(lowercase )}.' )
elif isinstance(lowercase , lowercase ):
A_ : Dict = [negative_prompt]
elif batch_size != len(lowercase ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowercase )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
A_ : Dict = negative_prompt
A_ : int = text_input_ids.shape[-1]
A_ : List[Any] = self.tokenizer(
lowercase , padding="""max_length""" , max_length=lowercase , truncation=lowercase , return_tensors="""pt""" , )
A_ : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Optional[Any] = uncond_embeddings.shape[1]
A_ : str = uncond_embeddings.repeat(lowercase , lowercase , 1 )
A_ : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
lowercase , generator=lowercase , device="""cpu""" , dtype=lowercase ).to(self.device )
A_ : int = torch.randn(lowercase , generator=lowercase , device="""cpu""" , dtype=lowercase ).to(
self.device )
else:
A_ : int = torch.randn(
lowercase , generator=lowercase , device=self.device , dtype=lowercase )
A_ : str = torch.randn(lowercase , generator=lowercase , device=self.device , dtype=lowercase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
A_ : str = latents_reference.to(self.device )
A_ : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : Optional[int] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : int = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : Optional[int] = max(-dx , 0 )
A_ : List[str] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Dict = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : Any = {}
if accepts_eta:
A_ : Optional[int] = eta
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Tuple = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
A_ : List[str] = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
A_, A_ : str = noise_pred.chunk(2 )
A_ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : List[str] = self.scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase , lowercase , lowercase )
A_ : List[str] = 1 / 0.1_82_15 * latents
A_ : List[str] = self.vae.decode(lowercase ).sample
A_ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : Union[str, Any] = self.feature_extractor(self.numpy_to_pil(lowercase ) , return_tensors="""pt""" ).to(
self.device )
A_, A_ : Optional[int] = self.safety_checker(
images=lowercase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : Tuple = None
if output_type == "pil":
A_ : Tuple = self.numpy_to_pil(lowercase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowercase , nsfw_content_detected=lowercase ) | 206 | 0 |
import argparse
import os
import re
UpperCamelCase__ = "src/transformers"
# Pattern that looks at the indentation in a line.
UpperCamelCase__ = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase__ = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase__ = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase__ = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase__ = re.compile(r"\[([^\]]+)\]")
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
UpperCamelCase__ = _re_indent.search(a__ )
return "" if search is None else search.groups()[0]
def _UpperCamelCase (a__ :List[str] , a__ :str="" , a__ :Dict=None , a__ :List[str]=None ):
"""simple docstring"""
UpperCamelCase__ = 0
UpperCamelCase__ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(a__ ):
index += 1
UpperCamelCase__ = ["""\n""".join(lines[:index] )]
else:
UpperCamelCase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ = [lines[index]]
index += 1
while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(a__ ) )
if index < len(a__ ) - 1:
UpperCamelCase__ = [lines[index + 1]]
index += 1
else:
UpperCamelCase__ = []
else:
blocks.append("""\n""".join(a__ ) )
UpperCamelCase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a__ ) > 0:
blocks.append("""\n""".join(a__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
def _inner(a__ :int ):
return key(a__ ).lower().replace("""_""" , """""" )
return _inner
def _UpperCamelCase (a__ :Tuple , a__ :Optional[Any]=None ):
"""simple docstring"""
def noop(a__ :Optional[int] ):
return x
if key is None:
UpperCamelCase__ = noop
# Constants are all uppercase, they go first.
UpperCamelCase__ = [obj for obj in objects if key(a__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ = [obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ = [obj for obj in objects if not key(a__ )[0].isupper()]
UpperCamelCase__ = ignore_underscore(a__ )
return sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) + sorted(a__ , key=a__ )
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
def _replace(a__ :List[str] ):
UpperCamelCase__ = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
UpperCamelCase__ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(a__ )] ) + "]"
UpperCamelCase__ = import_statement.split("""\n""" )
if len(a__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ = 2 if lines[1].strip() == """[""" else 1
UpperCamelCase__ = [(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase__ = sort_objects(a__ , key=lambda a__ : x[1] )
UpperCamelCase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase__ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ = keys[:-1]
UpperCamelCase__ = get_indent(lines[1] ) + """, """.join([f"""\"{k}\"""" for k in sort_objects(a__ )] )
return "\n".join(a__ )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ = _re_bracket_content.sub(_replace , a__ )
return import_statement
def _UpperCamelCase (a__ :Any , a__ :str=True ):
"""simple docstring"""
with open(a__ , encoding="""utf-8""" ) as f:
UpperCamelCase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ = split_code_in_indented_blocks(
a__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ = main_blocks[block_idx]
UpperCamelCase__ = block.split("""\n""" )
# Get to the start of the imports.
UpperCamelCase__ = 0
while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ = len(a__ )
else:
line_idx += 1
if line_idx >= len(a__ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ = """\n""".join(block_lines[line_idx:-1] )
UpperCamelCase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ = split_code_in_indented_blocks(a__ , indent_level=a__ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ = [(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ = [(i, key) for i, key in enumerate(a__ ) if key is not None]
UpperCamelCase__ = [x[0] for x in sorted(a__ , key=lambda a__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ = 0
UpperCamelCase__ = []
for i in range(len(a__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
UpperCamelCase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(a__ )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(a__ ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(a__ ) )
def _UpperCamelCase (a__ :List[str]=True ):
"""simple docstring"""
UpperCamelCase__ = []
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
UpperCamelCase__ = sort_imports(os.path.join(a__ , """__init__.py""" ) , check_only=a__ )
if result:
UpperCamelCase__ = [os.path.join(a__ , """__init__.py""" )]
if len(a__ ) > 0:
raise ValueError(f"""Would overwrite {len(a__ )} files, run `make style`.""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
UpperCamelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 87 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCamelCase__ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCamelCase__ = "UperNetConfig"
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = False , __lowerCAmelCase = 1 , ):
super().__init__()
UpperCamelCase__ = nn.Convad(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , bias=__lowerCAmelCase , dilation=__lowerCAmelCase , )
UpperCamelCase__ = nn.BatchNormad(__lowerCAmelCase )
UpperCamelCase__ = nn.ReLU()
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = self.conv(__lowerCAmelCase )
UpperCamelCase__ = self.batch_norm(__lowerCAmelCase )
UpperCamelCase__ = self.activation(__lowerCAmelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
UpperCamelCase__ = [
nn.AdaptiveAvgPoolad(__lowerCAmelCase ),
UperNetConvModule(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = input
for layer in self.layers:
UpperCamelCase__ = layer(__lowerCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
UpperCamelCase__ = pool_scales
UpperCamelCase__ = align_corners
UpperCamelCase__ = in_channels
UpperCamelCase__ = channels
UpperCamelCase__ = []
for i, pool_scale in enumerate(__lowerCAmelCase ):
UpperCamelCase__ = UperNetPyramidPoolingBlock(pool_scale=__lowerCAmelCase , in_channels=__lowerCAmelCase , channels=__lowerCAmelCase )
self.blocks.append(__lowerCAmelCase )
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = []
for ppm in self.blocks:
UpperCamelCase__ = ppm(__lowerCAmelCase )
UpperCamelCase__ = nn.functional.interpolate(
__lowerCAmelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(__lowerCAmelCase )
return ppm_outs
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
UpperCamelCase__ = config
UpperCamelCase__ = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase__ = in_channels
UpperCamelCase__ = config.hidden_size
UpperCamelCase__ = False
UpperCamelCase__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase__ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase__ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase__ = nn.ModuleList()
UpperCamelCase__ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase__ = UperNetConvModule(__lowerCAmelCase , self.channels , kernel_size=1 )
UpperCamelCase__ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__lowerCAmelCase )
self.fpn_convs.append(__lowerCAmelCase )
UpperCamelCase__ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowerCamelCase ( self ):
self.apply(self._init_weights )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = inputs[-1]
UpperCamelCase__ = [x]
psp_outs.extend(self.psp_modules(__lowerCAmelCase ) )
UpperCamelCase__ = torch.cat(__lowerCAmelCase , dim=1 )
UpperCamelCase__ = self.bottleneck(__lowerCAmelCase )
return output
def _lowerCamelCase ( self , __lowerCAmelCase ):
# build laterals
UpperCamelCase__ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__lowerCAmelCase ) )
# build top-down path
UpperCamelCase__ = len(__lowerCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase__ = laterals[i - 1].shape[2:]
UpperCamelCase__ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__lowerCAmelCase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
UpperCamelCase__ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase__ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
UpperCamelCase__ = torch.cat(__lowerCAmelCase , dim=1 )
UpperCamelCase__ = self.fpn_bottleneck(__lowerCAmelCase )
UpperCamelCase__ = self.classifier(__lowerCAmelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 3 , __lowerCAmelCase = 1 ):
super().__init__()
UpperCamelCase__ = config
UpperCamelCase__ = config.auxiliary_in_channels
UpperCamelCase__ = config.auxiliary_channels
UpperCamelCase__ = config.auxiliary_num_convs
UpperCamelCase__ = config.auxiliary_concat_input
UpperCamelCase__ = in_index
UpperCamelCase__ = (kernel_size // 2) * dilation
UpperCamelCase__ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
if self.num_convs == 0:
UpperCamelCase__ = nn.Identity()
else:
UpperCamelCase__ = nn.Sequential(*__lowerCAmelCase )
if self.concat_input:
UpperCamelCase__ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=kernel_size // 2 )
UpperCamelCase__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowerCamelCase ( self ):
self.apply(self._init_weights )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self , __lowerCAmelCase ):
# just take the relevant feature maps
UpperCamelCase__ = encoder_hidden_states[self.in_index]
UpperCamelCase__ = self.convs(__lowerCAmelCase )
if self.concat_input:
UpperCamelCase__ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase__ = self.classifier(__lowerCAmelCase )
return output
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Any = UperNetConfig
snake_case : List[Any] = """pixel_values"""
snake_case : Optional[Any] = True
def _lowerCamelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCamelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = value
UpperCamelCase__ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCamelCase__ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , _a , )
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase ):
super().__init__(__lowerCAmelCase )
UpperCamelCase__ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase__ = UperNetHead(__lowerCAmelCase , in_channels=self.backbone.channels )
UpperCamelCase__ = UperNetFCNHead(__lowerCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def _lowerCamelCase ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase__ = self.backbone.forward_with_filtered_kwargs(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , output_attentions=__lowerCAmelCase )
UpperCamelCase__ = outputs.feature_maps
UpperCamelCase__ = self.decode_head(__lowerCAmelCase )
UpperCamelCase__ = nn.functional.interpolate(__lowerCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowerCAmelCase )
UpperCamelCase__ = None
if self.auxiliary_head is not None:
UpperCamelCase__ = self.auxiliary_head(__lowerCAmelCase )
UpperCamelCase__ = nn.functional.interpolate(
__lowerCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowerCAmelCase )
UpperCamelCase__ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
UpperCamelCase__ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase__ = (logits,) + outputs[1:]
else:
UpperCamelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 87 | 1 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _UpperCAmelCase :
def __init__( self : Optional[Any] , A : List[Any] , A : int , A : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase_ : List[Any] = img
lowercase_ : Union[str, Any] = img.shape[1]
lowercase_ : Dict = img.shape[0]
lowercase_ : Union[str, Any] = dst_width
lowercase_ : Tuple = dst_height
lowercase_ : List[Any] = self.src_w / self.dst_w
lowercase_ : Dict = self.src_h / self.dst_h
lowercase_ : Tuple = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def A ( self : List[Any] ) -> Optional[int]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase_ : Optional[int] = self.img[self.get_y(A )][self.get_x(A )]
def A ( self : Optional[Any] , A : int ) -> int:
return int(self.ratio_x * x )
def A ( self : Any , A : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__A , __A : Dict = 800, 600
__A : int = imread('''image_data/lena.jpg''', 1)
__A : List[str] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 33 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Any , A : Tuple=7 , A : Tuple=3 , A : Optional[Any]=30 , A : List[Any]=4_00 , A : Tuple=True , A : Dict=None , A : List[str]=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[Any]=1 / 2_55 , A : Union[str, Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : str = min_resolution
lowercase_ : Any = max_resolution
lowercase_ : str = do_resize
lowercase_ : Any = size
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : Optional[Any] = image_std
lowercase_ : int = do_rescale
lowercase_ : List[str] = rescale_factor
lowercase_ : int = do_pad
def A ( self : Any ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[Any] , A : int , A : int=False ) -> Tuple:
if not batched:
lowercase_ : Optional[int] = image_inputs[0]
if isinstance(A , Image.Image ):
lowercase_ , lowercase_ : int = image.size
else:
lowercase_ , lowercase_ : Tuple = image.shape[1], image.shape[2]
if w < h:
lowercase_ : int = int(self.size['''shortest_edge'''] * h / w )
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
lowercase_ : Optional[int] = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase_ : Any = self.size['''shortest_edge''']
lowercase_ : Any = self.size['''shortest_edge''']
else:
lowercase_ : Tuple = []
for image in image_inputs:
lowercase_ , lowercase_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Union[str, Any] = max(A , key=lambda A : item[0] )[0]
lowercase_ : Optional[Any] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def A ( self : str ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def A ( self : Dict ) -> Tuple:
lowercase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowercase_ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A )
def A ( self : Optional[int] ) -> Tuple:
pass
def A ( self : Tuple ) -> int:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
lowercase_ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str ) -> Any:
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Optional[int] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[str] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[Any]:
# Initialize image_processings
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Tuple = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ : Union[str, Any] = image_processing_a.pad(A , return_tensors='''pt''' )
lowercase_ : List[Any] = image_processing_a(A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A ( self : str ) -> List[Any]:
# prepare image and target
lowercase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase_ : List[Any] = json.loads(f.read() )
lowercase_ : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
lowercase_ : Union[str, Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase_ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowercase_ : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def A ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
lowercase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase_ : str = json.loads(f.read() )
lowercase_ : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
lowercase_ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase_ : int = YolosImageProcessor(format='''coco_panoptic''' )
lowercase_ : Any = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowercase_ : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowercase_ : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 33 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : int = '''gpt_bigcode'''
_lowercase : Optional[int] = ['''past_key_values''']
_lowercase : Dict = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=50_257 , _lowercase=1_024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 229 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = 13
_lowerCAmelCase = 7
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = 99
_lowerCAmelCase = 384
_lowerCAmelCase = 2
_lowerCAmelCase = 4
_lowerCAmelCase = 37
_lowerCAmelCase = """gelu"""
_lowerCAmelCase = 0.1
_lowerCAmelCase = 0.1
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 2
_lowerCAmelCase = 0.02
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = 2
_lowerCAmelCase = 9
_lowerCAmelCase = 1
_lowerCAmelCase = None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel(config=_lowercase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(_lowercase )
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForMaskedLM(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForSequenceClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = TFConvBertForMultipleChoice(config=_lowercase )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForTokenClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForQuestionAnswering(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : Optional[Any] = False
_lowercase : Dict = False
_lowercase : Any = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = True
if hasattr(_lowercase , """use_cache""" ):
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
for model_class in self.all_model_classes:
_lowerCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
_lowerCAmelCase = os.path.join(_lowercase , """saved_model""" , """1""" )
_lowerCAmelCase = tf.keras.models.load_model(_lowercase )
_lowerCAmelCase = model(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = outputs["""encoder_hidden_states"""]
_lowerCAmelCase = outputs["""encoder_attentions"""]
else:
_lowerCAmelCase = outputs["""hidden_states"""]
_lowerCAmelCase = outputs["""attentions"""]
self.assertEqual(len(_lowercase ) , _lowercase )
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
def check_decoder_attentions_output(_lowercase ):
_lowerCAmelCase = len(_lowercase )
self.assertEqual(out_len % 2 , 0 )
_lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowercase ):
_lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCAmelCase = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
_lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(_lowercase )[0]
_lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
_lowerCAmelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1e-4 )
| 229 | 1 |
'''simple docstring'''
import math
import unittest
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
with self.assertRaises(lowercase__ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,)
self.assertFalse(
is_prime(1 ) ,'''One only has 1 positive factor, primes must have exactly two.''' ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 104 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : Tuple ,lowercase__ : Dict=1_3 ,lowercase__ : List[str]=3_0 ,lowercase__ : Tuple=2 ,lowercase__ : Optional[int]=3 ,lowercase__ : List[str]=True ,lowercase__ : Tuple=True ,lowercase__ : int=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : Tuple=4 ,lowercase__ : Any=3_7 ,lowercase__ : Any="gelu" ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : str=1_0 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Union[str, Any]=3 ,lowercase__ : Optional[int]=0.6 ,lowercase__ : List[Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : List[str] ):
__lowercase = ViTMAEModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Optional[Any] ):
__lowercase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size // self.patch_size) ** 2
__lowercase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowercase = 1
__lowercase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(lowercase__ )
__lowercase = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ViTMAEModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
__lowercase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowercase = torch.from_numpy(lowercase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowercase = pt_noise
super().check_pt_tf_models(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs[0].cpu().numpy()
__lowercase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
__lowercase = model_class.from_pretrained(lowercase__ )
model.to(lowercase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
# Make sure we don't have nans
__lowercase = after_outputs[0].cpu().numpy()
__lowercase = 0
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ ,1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ViTMAEModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__lowercase = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowercase = ViTMAEConfig()
__lowercase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ ,noise=torch.from_numpy(lowercase__ ).to(device=lowercase__ ) )
# verify the logits
__lowercase = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(lowercase__ ) ,atol=1e-4 ) )
| 104 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int ) -> list[int]:
if length <= 0 or not isinstance(lowercase_ , lowercase_ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(lowercase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 73 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCAmelCase_( lowercase_ : List[str]="" ) -> str:
_lowerCamelCase = tempfile.mkdtemp()
return os.path.join(lowercase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase = AgentAudio(lowerCamelCase__ )
_lowerCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_lowerCamelCase , _lowerCamelCase = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , torch.tensor(lowerCamelCase__ ) , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase = get_new_path(suffix='''.wav''' )
sf.write(lowerCamelCase__ , lowerCamelCase__ , 1_6_0_0_0 )
_lowerCamelCase = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , lowerCamelCase__ )
@require_vision
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
_lowerCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = '''Hey!'''
_lowerCamelCase = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , agent_type.to_string() )
self.assertEqual(lowerCamelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 73 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["ConvNextFeatureExtractor"]
_lowerCamelCase =["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 334 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 334 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """efficientformer"""
def __init__( self , A = [3, 2, 6, 4] , A = [4_8, 9_6, 2_2_4, 4_4_8] , A = [True, True, True, True] , A = 4_4_8 , A = 3_2 , A = 4 , A = 7 , A = 5 , A = 8 , A = 4 , A = 0.0 , A = 1_6 , A = 3 , A = 3 , A = 3 , A = 2 , A = 1 , A = 0.0 , A = 1 , A = True , A = True , A = 1e-5 , A = "gelu" , A = 0.02 , A = 1e-1_2 , A = 2_2_4 , A = 1e-0_5 , **A , ) -> None:
super().__init__(**A )
snake_case : Dict = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Any = hidden_sizes
snake_case : Optional[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : List[Any] = initializer_range
snake_case : str = layer_norm_eps
snake_case : Dict = patch_size
snake_case : Optional[int] = num_channels
snake_case : int = depths
snake_case : Optional[int] = mlp_expansion_ratio
snake_case : Any = downsamples
snake_case : Dict = dim
snake_case : Optional[int] = key_dim
snake_case : Union[str, Any] = attention_ratio
snake_case : Any = resolution
snake_case : Dict = pool_size
snake_case : Any = downsample_patch_size
snake_case : Tuple = downsample_stride
snake_case : Any = downsample_pad
snake_case : Union[str, Any] = drop_path_rate
snake_case : List[str] = num_metaad_blocks
snake_case : Union[str, Any] = distillation
snake_case : List[str] = use_layer_scale
snake_case : int = layer_scale_init_value
snake_case : Union[str, Any] = image_size
snake_case : Dict = batch_norm_eps
| 176 |
from collections import defaultdict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> bool:
snake_case : List[str] = first_str.lower().strip()
snake_case : List[str] = second_str.lower().strip()
# Remove whitespace
snake_case : Any = first_str.replace(""" """ ,"""""" )
snake_case : List[str] = second_str.replace(""" """ ,"""""" )
# Strings of different lengths are not anagrams
if len(lowercase ) != len(lowercase ):
return False
# Default values for count should be 0
snake_case : defaultdict[str, int] = defaultdict(lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : List[Any] = input('Enter the first string ').strip()
lowerCamelCase : Optional[int] = input('Enter the second string ').strip()
lowerCamelCase : Optional[Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 176 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_snake_case : List[str] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_snake_case : Optional[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def lowerCAmelCase_ ( ):
__snake_case : List[str] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__snake_case : List[str] = bs[:]
__snake_case : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
__snake_case : Optional[int] = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = set()
__snake_case : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case : Union[str, Any] = char
return pairs
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Optional[int]="<s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : str="</s>" , lowerCamelCase : str="<s>" , lowerCamelCase : Optional[int]="<unk>" , lowerCamelCase : List[str]="<pad>" , lowerCamelCase : Tuple="<mask>" , lowerCamelCase : Optional[Any]=False , **lowerCamelCase : str , ) -> List[str]:
__snake_case : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__snake_case : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__snake_case : Optional[int] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__snake_case : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__snake_case : int = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__snake_case : Union[str, Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : Optional[int] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__snake_case : Dict = json.load(lowerCamelCase )
__snake_case : Optional[Any] = {v: k for k, v in self.encoder.items()}
__snake_case : Dict = errors # how to handle errors in decoding
__snake_case : List[str] = bytes_to_unicode()
__snake_case : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__snake_case : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
__snake_case : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case : Optional[int] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__snake_case : Union[str, Any] = {}
__snake_case : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __snake_case ( self : Optional[int] ) -> List[str]:
return len(self.encoder )
def __snake_case ( self : int ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[str] ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
__snake_case : int = tuple(lowerCamelCase )
__snake_case : Optional[Any] = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__snake_case : List[Any] = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case : Tuple = bigram
__snake_case : str = []
__snake_case : Any = 0
while i < len(lowerCamelCase ):
try:
__snake_case : Optional[int] = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case : Dict = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case : Optional[int] = tuple(lowerCamelCase )
__snake_case : Tuple = new_word
if len(lowerCamelCase ) == 1:
break
else:
__snake_case : Optional[Any] = get_pairs(lowerCamelCase )
__snake_case : Union[str, Any] = " ".join(lowerCamelCase )
__snake_case : Optional[int] = word
return word
def __snake_case ( self : Optional[int] , lowerCamelCase : Dict ) -> List[Any]:
__snake_case : Dict = []
for token in re.findall(self.pat , lowerCamelCase ):
__snake_case : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def __snake_case ( self : List[str] , lowerCamelCase : List[str] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def __snake_case ( self : str , lowerCamelCase : Optional[Any] ) -> int:
return self.decoder.get(lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : Any ) -> Union[str, Any]:
__snake_case : int = "".join(lowerCamelCase )
__snake_case : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __snake_case ( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : int = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__snake_case : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__snake_case : Tuple = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
__snake_case : str = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def __snake_case ( self : Optional[int] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
__snake_case : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def __snake_case ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any]=False , **lowerCamelCase : Union[str, Any] ) -> List[str]:
__snake_case : List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__snake_case : Tuple = " " + text
return (text, kwargs)
| 123 |
from importlib import import_module
from .logging import get_logger
_snake_case : Optional[int] = get_logger(__name__)
class a :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str]=None ) -> Any:
__snake_case : Dict = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
__snake_case : int = module._original_module if isinstance(lowerCamelCase , _PatchedModuleObj ) else module
class a :
"""simple docstring"""
__UpperCAmelCase : List[Any] = []
def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]=None ) -> List[Any]:
__snake_case : Union[str, Any] = obj
__snake_case : Dict = target
__snake_case : Any = new
__snake_case : List[str] = target.split("." )[0]
__snake_case : Union[str, Any] = {}
__snake_case : int = attrs or []
def __enter__( self : List[Any] ) -> Tuple:
*__snake_case , __snake_case : int = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCamelCase ) ):
try:
__snake_case : Any = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__snake_case : Union[str, Any] = getattr(self.obj , lowerCamelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__snake_case : List[Any] = obj_attr
# patch at top level
setattr(self.obj , lowerCamelCase , _PatchedModuleObj(lowerCamelCase , attrs=self.attrs ) )
__snake_case : Optional[int] = getattr(self.obj , lowerCamelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCamelCase , lowerCamelCase , _PatchedModuleObj(getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , attrs=self.attrs ) )
__snake_case : List[Any] = getattr(lowerCamelCase , lowerCamelCase )
# finally set the target attribute
setattr(lowerCamelCase , lowerCamelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__snake_case : Union[str, Any] = getattr(import_module(".".join(lowerCamelCase ) ) , lowerCamelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCamelCase ) is attr_value:
__snake_case : Tuple = getattr(self.obj , lowerCamelCase )
setattr(self.obj , lowerCamelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__snake_case : Dict = globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCamelCase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Any , *lowerCamelCase : Any ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCamelCase , self.original.pop(lowerCamelCase ) )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
self.__enter__()
self._active_patches.append(self )
def __snake_case ( self : Any ) -> List[str]:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 123 | 1 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=10 , lowerCAmelCase=3 , lowerCAmelCase=32 * 8 , lowerCAmelCase=32 * 8 , lowerCAmelCase=4 , lowerCAmelCase=64 , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = is_training
snake_case = use_auxiliary_loss
snake_case = num_queries
snake_case = num_channels
snake_case = min_size
snake_case = max_size
snake_case = num_labels
snake_case = hidden_dim
snake_case = hidden_dim
def snake_case ( self ):
"""simple docstring"""
snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCamelCase )
snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__UpperCamelCase )
snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__UpperCamelCase ) > 0.5
).float()
snake_case = (torch.rand((self.batch_size, self.num_labels) , device=__UpperCamelCase ) > 0.5).long()
snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case ( self ):
"""simple docstring"""
snake_case = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case = self.num_queries
snake_case = self.num_labels
snake_case = [1, 1, 1, 1]
snake_case = self.num_channels
snake_case = 64
snake_case = 1_28
snake_case = self.hidden_dim
snake_case = self.hidden_dim
snake_case = self.hidden_dim
return config
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
snake_case = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = output.encoder_hidden_states
snake_case = output.pixel_decoder_hidden_states
snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCamelCase ) , config.decoder_layers )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
with torch.no_grad():
snake_case = MaskaFormerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case = model(pixel_values=__UpperCamelCase , pixel_mask=__UpperCamelCase )
snake_case = model(__UpperCamelCase , output_hidden_states=__UpperCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCamelCase , __UpperCamelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = MaskaFormerForUniversalSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
def comm_check_on_output(lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case = model(pixel_values=__UpperCamelCase , pixel_mask=__UpperCamelCase )
snake_case = model(__UpperCamelCase )
comm_check_on_output(__UpperCamelCase )
snake_case = model(
pixel_values=__UpperCamelCase , pixel_mask=__UpperCamelCase , mask_labels=__UpperCamelCase , class_labels=__UpperCamelCase )
comm_check_on_output(__UpperCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCAmelCase : Optional[int] = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Any = False
def snake_case ( self ):
"""simple docstring"""
snake_case = MaskaFormerModelTester(self )
snake_case = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__UpperCamelCase , **__UpperCamelCase , output_hidden_states=__UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__UpperCamelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__UpperCamelCase )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case = MaskaFormerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = (self.model_tester.min_size,) * 2
snake_case = {
'pixel_values': torch.randn((2, 3, *size) , device=__UpperCamelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=__UpperCamelCase ),
'class_labels': torch.zeros(2 , 10 , device=__UpperCamelCase ).long(),
}
snake_case = self.model_tester.get_config()
snake_case = MaskaFormerForUniversalSegmentation(__UpperCamelCase ).to(__UpperCamelCase )
snake_case = model(**__UpperCamelCase )
self.assertTrue(outputs.loss is not None )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__UpperCamelCase , **__UpperCamelCase , output_hidden_states=__UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__UpperCamelCase ).to(__UpperCamelCase )
snake_case = model(**__UpperCamelCase , output_attentions=__UpperCamelCase )
self.assertTrue(outputs.attentions is not None )
def snake_case ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
snake_case = self.all_model_classes[1]
snake_case = self.model_tester.prepare_config_and_inputs()
snake_case = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
snake_case = model(__UpperCamelCase , mask_labels=__UpperCamelCase , class_labels=__UpperCamelCase ).loss
loss.backward()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.all_model_classes[1]
snake_case = self.model_tester.prepare_config_and_inputs()
snake_case = True
snake_case = True
snake_case = model_class(__UpperCamelCase ).to(__UpperCamelCase )
model.train()
snake_case = model(__UpperCamelCase , mask_labels=__UpperCamelCase , class_labels=__UpperCamelCase )
snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
SCREAMING_SNAKE_CASE__ = 1E-4
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def snake_case ( self ):
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def snake_case ( self ):
"""simple docstring"""
snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__UpperCamelCase )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
snake_case = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCamelCase , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case = model(**__UpperCamelCase )
snake_case = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(__UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
snake_case = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(__UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
snake_case = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(__UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__UpperCamelCase ).eval()
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
snake_case = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCamelCase , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case = model(**__UpperCamelCase )
# masks_queries_logits
snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
snake_case = torch.tensor(__UpperCamelCase ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
# class_queries_logits
snake_case = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__UpperCamelCase ).eval()
snake_case = self.default_image_processor
snake_case = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='pt' , )
snake_case = inputs['pixel_values'].to(__UpperCamelCase )
snake_case = [el.to(__UpperCamelCase ) for el in inputs['mask_labels']]
snake_case = [el.to(__UpperCamelCase ) for el in inputs['class_labels']]
with torch.no_grad():
snake_case = model(**__UpperCamelCase )
self.assertTrue(outputs.loss is not None )
| 150 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['pixel_values']
def __init__( self ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 0.9 ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 1 / 255 ,__UpperCamelCase = True ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
lowercase_ : Optional[int] = size if size is not None else {'shortest_edge': 224}
lowercase_ : Union[str, Any] = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase_ : Optional[int] = get_size_dict(__UpperCamelCase ,param_name='crop_size' )
lowercase_ : List[str] = do_resize
lowercase_ : List[Any] = size
lowercase_ : int = crop_pct
lowercase_ : Dict = resample
lowercase_ : List[str] = do_center_crop
lowercase_ : Union[str, Any] = crop_size
lowercase_ : List[Any] = do_rescale
lowercase_ : Tuple = rescale_factor
lowercase_ : Tuple = do_normalize
lowercase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : Any = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase_ : Union[str, Any] = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase_ : Tuple = int(size['height'] / crop_pct )
else:
lowercase_ : Dict = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(__UpperCamelCase ) )
lowercase_ : int = get_resize_output_image_size(__UpperCamelCase ,size=__UpperCamelCase ,default_to_square=__UpperCamelCase )
else:
if "shortest_edge" in size:
lowercase_ : Optional[int] = get_resize_output_image_size(__UpperCamelCase ,size=size['shortest_edge'] ,default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
lowercase_ : Dict = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(__UpperCamelCase ) )
return resize(__UpperCamelCase ,size=__UpperCamelCase ,resample=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : List[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCamelCase ,size=(size['height'], size['width']) ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
return rescale(__UpperCamelCase ,scale=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
return normalize(__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,**__UpperCamelCase ,) -> PIL.Image.Image:
'''simple docstring'''
lowercase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ : List[str] = resample if resample is not None else self.resample
lowercase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : str = image_mean if image_mean is not None else self.image_mean
lowercase_ : Tuple = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Tuple = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : List[str] = get_size_dict(__UpperCamelCase ,param_name='crop_size' )
lowercase_ : str = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase_ : Optional[Any] = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
lowercase_ : str = [self.resize(image=__UpperCamelCase ,size=__UpperCamelCase ,crop_pct=__UpperCamelCase ,resample=__UpperCamelCase ) for image in images]
if do_center_crop:
lowercase_ : str = [self.center_crop(image=__UpperCamelCase ,size=__UpperCamelCase ) for image in images]
if do_rescale:
lowercase_ : Any = [self.rescale(image=__UpperCamelCase ,scale=__UpperCamelCase ) for image in images]
if do_normalize:
lowercase_ : int = [self.normalize(image=__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ) for image in images]
lowercase_ : Dict = [to_channel_dimension_format(__UpperCamelCase ,__UpperCamelCase ) for image in images]
lowercase_ : Any = {'pixel_values': images}
return BatchFeature(data=__UpperCamelCase ,tensor_type=__UpperCamelCase )
| 213 | 0 |
from collections.abc import Sequence
def __lowercase ( __lowerCAmelCase : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
a__ = nums[0]
for i in range(1 , len(__lowerCAmelCase ) ):
a__ = nums[i]
a__ = max(__lowerCAmelCase , ans + num , __lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
snake_case : Optional[int] = int(input('''Enter number of elements : ''').strip())
snake_case : Dict = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 109 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = MvpTokenizer
UpperCAmelCase__ : List[Any] = MvpTokenizerFast
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Tuple = filter_roberta_detectors
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
super().setUp()
a__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
a__ = dict(zip(__snake_case ,range(len(__snake_case ) ) ) )
a__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a__ = {'unk_token': '<unk>'}
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowerCamelCase__( self :Optional[Any] ,**__snake_case :Tuple ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__snake_case )
def lowerCamelCase__( self :Optional[Any] ,**__snake_case :Dict ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :Union[str, Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def lowerCamelCase__( self :Optional[int] ) -> str:
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a__ = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ = tokenizer(__snake_case ,max_length=len(__snake_case ) ,padding=__snake_case ,return_tensors='pt' )
self.assertIsInstance(__snake_case ,__snake_case )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
a__ = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case ,__snake_case )
# Test that special tokens are reset
@require_torch
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ = tokenizer(__snake_case ,padding=__snake_case ,return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' ,__snake_case )
self.assertIn('attention_mask' ,__snake_case )
self.assertNotIn('labels' ,__snake_case )
self.assertNotIn('decoder_attention_mask' ,__snake_case )
@require_torch
def lowerCamelCase__( self :Any ) -> int:
a__ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ = tokenizer(text_target=__snake_case ,max_length=32 ,padding='max_length' ,return_tensors='pt' )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
@require_torch
def lowerCamelCase__( self :int ) -> Union[str, Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] ,padding=__snake_case ,truncation=__snake_case ,return_tensors='pt' )
self.assertIsInstance(__snake_case ,__snake_case )
self.assertEqual(batch.input_ids.shape ,(2, 10_24) )
@require_torch
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = ['A long paragraph for summarization.']
a__ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ = tokenizer(__snake_case ,text_target=__snake_case ,return_tensors='pt' )
a__ = inputs['input_ids']
a__ = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = self.rust_tokenizer_class.from_pretrained(__snake_case ,**__snake_case )
a__ = self.tokenizer_class.from_pretrained(__snake_case ,**__snake_case )
a__ = 'A, <mask> AllenNLP sentence.'
a__ = tokenizer_r.encode_plus(__snake_case ,add_special_tokens=__snake_case ,return_token_type_ids=__snake_case )
a__ = tokenizer_p.encode_plus(__snake_case ,add_special_tokens=__snake_case ,return_token_type_ids=__snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
a__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
a__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__snake_case ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__snake_case ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 109 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] ='''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
SCREAMING_SNAKE_CASE__ : Dict =Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return image
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =val
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : str ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE__ : Tuple =state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
SCREAMING_SNAKE_CASE__ : int =state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE__ : str =torch.cat((q_bias, torch.zeros_like(UpperCamelCase__, requires_grad=UpperCamelCase__ ), v_bias) )
SCREAMING_SNAKE_CASE__ : List[Any] =qkv_bias
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =3_6_4 if '''coco''' in model_name else 2_2_4
SCREAMING_SNAKE_CASE__ : int =InstructBlipVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
SCREAMING_SNAKE_CASE__ : str =TaConfig.from_pretrained('''google/flan-t5-xl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE__ : Any =TaConfig.from_pretrained('''google/flan-t5-xxl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
SCREAMING_SNAKE_CASE__ : str =LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''', vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''', vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] =InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
SCREAMING_SNAKE_CASE__ : Tuple =InstructBlipConfig(vision_config=UpperCamelCase__, text_config=UpperCamelCase__, qformer_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : Tuple=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoTokenizer.from_pretrained('''bert-base-uncased''', truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =TaTokenizerFast.from_pretrained('''google/flan-t5-xl''', truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
SCREAMING_SNAKE_CASE__ : int =LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''', truncation_side='''left''', bos_token='''</s>''', unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =get_blipa_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =InstructBlipForConditionalGeneration(UpperCamelCase__ ).eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
SCREAMING_SNAKE_CASE__ : Dict ='''cuda:1''' if torch.cuda.is_available() else '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[int] ='''cuda:2''' if torch.cuda.is_available() else '''cpu'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =load_model_and_preprocess(
name=UpperCamelCase__, model_type=UpperCamelCase__, is_eval=UpperCamelCase__, device=UpperCamelCase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
SCREAMING_SNAKE_CASE__ : Any =original_model.state_dict()
SCREAMING_SNAKE_CASE__ : List[Any] =create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict.pop(UpperCamelCase__ )
if key.startswith('''Qformer.bert''' ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =key.replace('''Qformer.bert''', '''qformer''' )
if "attention.self" in key:
SCREAMING_SNAKE_CASE__ : Dict =key.replace('''self''', '''attention''' )
if "llm_proj" in key:
SCREAMING_SNAKE_CASE__ : Any =key.replace('''llm_proj''', '''language_projection''' )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE__ : Tuple =key.replace('''t5_proj''', '''language_projection''' )
if key.startswith('''llm_model''' ):
SCREAMING_SNAKE_CASE__ : int =key.replace('''llm_model''', '''language_model''' )
if key.startswith('''t5''' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =key.replace('''t5''', '''language''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__, UpperCamelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =load_demo_image()
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''What is unusual about this image?'''
# create processor
SCREAMING_SNAKE_CASE__ : List[Any] =BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size}, image_mean=UpperCamelCase__, image_std=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =InstructBlipProcessor(
image_processor=UpperCamelCase__, tokenizer=UpperCamelCase__, qformer_tokenizer=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : List[Any] =processor(images=UpperCamelCase__, text=UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
SCREAMING_SNAKE_CASE__ : int =vis_processors['''eval'''](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ), UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
SCREAMING_SNAKE_CASE__ : Tuple =hf_model(**UpperCamelCase__ ).logits
else:
SCREAMING_SNAKE_CASE__ : Tuple =original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
SCREAMING_SNAKE_CASE__ : int =tokenizer('''\n''', return_tensors='''pt''' ).input_ids.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -1_0_0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =hf_model(**UpperCamelCase__, labels=UpperCamelCase__ ).logits
print('''First values of original logits:''', original_logits[0, :3, :3] )
print('''First values of HF logits:''', logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
SCREAMING_SNAKE_CASE__ : Tuple =1e-4 if '''vicuna''' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ), UpperCamelCase__, atol=UpperCamelCase__ )
print('''Looks ok!''' )
print('''Generating with original model...''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt}, num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
SCREAMING_SNAKE_CASE__ : Tuple =hf_model.generate(
**UpperCamelCase__, do_sample=UpperCamelCase__, num_beams=5, max_length=2_5_6, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
SCREAMING_SNAKE_CASE__ : List[Any] =2
print('''Original generation:''', UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =processor.batch_decode(UpperCamelCase__, skip_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[text.strip() for text in output_text]
print('''HF generation:''', UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(f"Salesforce/{model_name}" )
hf_model.push_to_hub(f"Salesforce/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 152 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""input_features""", """attention_mask"""]
def __init__( self : Tuple , __lowercase : Optional[int]=80 , __lowercase : Optional[int]=1_60_00 , __lowercase : str=80 , __lowercase : Dict=0.0 , __lowercase : int=True , __lowercase : List[Any]=True , __lowercase : List[Any]=True , **__lowercase : int , ) -> int:
super().__init__(feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =num_mel_bins
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_ceptral_normalize
SCREAMING_SNAKE_CASE__ : List[str] =normalize_means
SCREAMING_SNAKE_CASE__ : Optional[int] =normalize_vars
SCREAMING_SNAKE_CASE__ : Tuple =True
def __magic_name__ ( self : Tuple , __lowercase : np.ndarray , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Any =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.from_numpy(__lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =ta_kaldi.fbank(__lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __magic_name__ ( __lowercase : np.ndarray , __lowercase : int , __lowercase : Optional[bool] = True , __lowercase : Optional[bool] = True , __lowercase : float = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
SCREAMING_SNAKE_CASE__ : str =x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.subtract(__lowercase , __lowercase )
if normalize_vars:
SCREAMING_SNAKE_CASE__ : Tuple =x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.divide(__lowercase , __lowercase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ : Tuple =padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ : str =x.astype(np.floataa )
return x
def __magic_name__ ( self : Optional[int] , __lowercase : List[np.ndarray] , __lowercase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
SCREAMING_SNAKE_CASE__ : int =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowercase , __lowercase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowercase , __lowercase )
]
def __call__( self : int , __lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowercase : Union[bool, str, PaddingStrategy] = False , __lowercase : Optional[int] = None , __lowercase : bool = False , __lowercase : Optional[int] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , **__lowercase : str , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__ : List[str] =isinstance(__lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE__ : str =is_batched_numpy or (
isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[np.asarray(__lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE__ : int =np.asarray(__lowercase , dtype=np.floataa )
elif isinstance(__lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ : Tuple =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ : List[str] =[raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ : Tuple =[self._extract_fbank_features(__lowercase ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ : Optional[int] =BatchFeature({'''input_features''': features} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.pad(
__lowercase , padding=__lowercase , max_length=__lowercase , truncation=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ : Optional[Any] =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __lowercase ):
SCREAMING_SNAKE_CASE__ : Any =[np.asarray(__lowercase , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ : Dict =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : str =[np.asarray(__lowercase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE__ : str =(
np.array(__lowercase , dtype=np.intaa )
if self._get_padding_strategies(__lowercase , max_length=__lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE__ : int =self.normalize(
padded_inputs['''input_features'''] , attention_mask=__lowercase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ : Dict =padded_inputs.convert_to_tensors(__lowercase )
return padded_inputs | 152 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCAmelCase = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def _lowerCamelCase( lowercase__ = "dhaka" , lowercase__ = 5 ) -> int:
'''simple docstring'''
__lowercase= min(lowercase__ , 5_0 ) # Prevent abuse!
__lowercase= {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__lowercase= requests.get('https://www.google.com/search' , params=lowercase__ , headers=lowercase__ )
__lowercase= BeautifulSoup(html.text , 'html.parser' )
__lowercase= ''.join(
re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__lowercase= json.dumps(lowercase__ )
__lowercase= json.loads(lowercase__ )
__lowercase= re.findall(
R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , lowercase__ , )
if not matched_google_image_data:
return 0
__lowercase= re.sub(
R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(lowercase__ ) , )
__lowercase= re.findall(
R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , lowercase__ , )
for index, fixed_full_res_image in enumerate(lowercase__ ):
if index >= max_images:
return index
__lowercase= bytes(lowercase__ , 'ascii' ).decode(
'unicode-escape' )
__lowercase= bytes(lowercase__ , 'ascii' ).decode(
'unicode-escape' )
__lowercase= urllib.request.build_opener()
__lowercase= [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(lowercase__ )
__lowercase= F'query_{query.replace(" " , "_" )}'
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
urllib.request.urlretrieve( # noqa: S310
lowercase__ , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
lowerCAmelCase = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 352 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _a ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
AutoTokenizer.from_pretrained(UpperCAmelCase ).save_pretrained(UpperCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 142 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 142 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : str = "megatron-bert"
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[Any]=29056 , lowerCAmelCase : Tuple=1024 , lowerCAmelCase : Tuple=24 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Any=4096 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Dict=1E-12 , lowerCAmelCase : Any=0 , lowerCAmelCase : Any="absolute" , lowerCAmelCase : Tuple=True , **lowerCAmelCase : Optional[Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 91 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCamelCase__:
__magic_name__ : List[str]
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = None
__magic_name__ : str = field(default="Translation" , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self : Union[str, Any] )-> str:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def a__( self : int )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class UpperCamelCase__:
__magic_name__ : Optional[List] = None
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = None
__magic_name__ : str = field(default="TranslationVariableLanguages" , init=lowerCAmelCase , repr=lowerCAmelCase )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase = len(self.languages ) if self.languages else None
def __call__( self : int )-> Optional[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def a__( self : Optional[int] , lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = set(self.languages )
if self.languages and set(lowerCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowerCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(lowerCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase , UpperCAmelCase = zip(*sorted(lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def a__( self : Any )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 91 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.