code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE (UpperCAmelCase_ ):
_UpperCamelCase : Dict = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_UpperCamelCase : Dict = Features({'text': Value('string' )} )
_UpperCamelCase : Union[str, Any] = Features({'summary': Value('string' )} )
_UpperCamelCase : List[str] = 'text'
_UpperCamelCase : int = 'summary'
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 235 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 32
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Accelerator , lowerCAmelCase: int = 16 ) -> int:
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
_UpperCAmelCase : str = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase: Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : int = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase: str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[Any] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : str = 8
else:
_UpperCAmelCase : List[Any] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE_ = mocked_dataloaders # noqa: F811
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: Any ) -> Any:
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1":
_UpperCAmelCase : Optional[Any] = 2
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Optional[int] = config["lr"]
_UpperCAmelCase : List[str] = int(config["num_epochs"] )
_UpperCAmelCase : Union[str, Any] = int(config["seed"] )
_UpperCAmelCase : str = int(config["batch_size"] )
_UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Any = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : int = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase , _UpperCAmelCase : Any = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : Dict = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
_UpperCAmelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : int = model(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Optional[int] = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_UpperCAmelCase : str = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_UpperCAmelCase : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCAmelCase : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
_UpperCAmelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_UpperCAmelCase : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 300 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __snake_case ( __A : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = StableDiffusionLatentUpscalePipeline
_SCREAMING_SNAKE_CASE : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
_SCREAMING_SNAKE_CASE : List[str] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
_SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : str = frozenset([] )
_SCREAMING_SNAKE_CASE : Optional[Any] = True
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[str] = 4
SCREAMING_SNAKE_CASE : Optional[Any] = (16, 16)
SCREAMING_SNAKE_CASE : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=a__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=a__ , only_cross_attention=a__ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
SCREAMING_SNAKE_CASE : Dict = EulerDiscreteScheduler(prediction_type='sample' )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='quick_gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(a__ )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : str = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _lowerCAmelCase ( self : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any]=0 ) -> Dict:
"""simple docstring"""
if str(a__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(a__ )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=a__ ).manual_seed(a__ )
SCREAMING_SNAKE_CASE : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'cpu'
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**a__ ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
SCREAMING_SNAKE_CASE : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1E-3 )
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**a__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Any = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE : Dict = getattr(a__ , scheduler_enum.name )
SCREAMING_SNAKE_CASE : Tuple = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**a__ )[0]
outputs.append(a__ )
assert check_same_shape(a__ )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
SCREAMING_SNAKE_CASE : Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
SCREAMING_SNAKE_CASE : Optional[Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
SCREAMING_SNAKE_CASE : Optional[int] = pipe(a__ , generator=a__ , output_type='latent' ).images
SCREAMING_SNAKE_CASE : List[Any] = upscaler(
prompt=a__ , image=a__ , num_inference_steps=20 , guidance_scale=0 , generator=a__ , output_type='np' , ).images[0]
SCREAMING_SNAKE_CASE : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
SCREAMING_SNAKE_CASE : Any = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
SCREAMING_SNAKE_CASE : str = upscaler(
prompt=a__ , image=a__ , num_inference_steps=20 , guidance_scale=0 , generator=a__ , output_type='np' , ).images[0]
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 265 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 51 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> Optional[Any]:
lowerCamelCase__ : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , UpperCAmelCase : int ) -> Any:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = hidden_states.shape
lowerCamelCase__ : int = jax.image.resize(
a__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCamelCase__ : Any = self.conv(a__ )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[Any] ) -> Tuple:
lowerCamelCase__ : int = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , UpperCAmelCase : Dict ) -> Dict:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCamelCase__ : int = self.conv(a__ )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = None
UpperCAmelCase__ = jnp.floataa
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : str = self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase__ : List[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : Any = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : List[Any] = nn.Dense(a__ , dtype=self.dtype )
lowerCamelCase__ : int = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : List[str] = nn.Dropout(self.dropout_prob )
lowerCamelCase__ : List[str] = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : List[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase__ : str = None
if use_nin_shortcut:
lowerCamelCase__ : Optional[Any] = nn.Conv(
a__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Dict=True ) -> List[Any]:
lowerCamelCase__ : Tuple = hidden_states
lowerCamelCase__ : List[Any] = self.norma(a__ )
lowerCamelCase__ : Union[str, Any] = nn.swish(a__ )
lowerCamelCase__ : Tuple = self.conva(a__ )
lowerCamelCase__ : Any = self.time_emb_proj(nn.swish(a__ ) )
lowerCamelCase__ : int = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 )
lowerCamelCase__ : Optional[int] = hidden_states + temb
lowerCamelCase__ : List[str] = self.norma(a__ )
lowerCamelCase__ : Optional[int] = nn.swish(a__ )
lowerCamelCase__ : List[Any] = self.dropout(a__ , a__ )
lowerCamelCase__ : Optional[Any] = self.conva(a__ )
if self.conv_shortcut is not None:
lowerCamelCase__ : str = self.conv_shortcut(a__ )
return hidden_states + residual
| 295 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51 | 0 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( UpperCAmelCase_ ):
UpperCamelCase_ : Optional[Any] = (EulerDiscreteScheduler,)
UpperCamelCase_ : Union[str, Any] = 10
def _SCREAMING_SNAKE_CASE ( self : str , **snake_case__ : Tuple ):
lowerCAmelCase__ = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**a__ )
return config
def _SCREAMING_SNAKE_CASE ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = scheduler.scale_model_input(a__ , a__ )
lowerCAmelCase__ = model(a__ , a__ )
lowerCAmelCase__ = scheduler.step(a__ , a__ , a__ , generator=a__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(a__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase__ = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = scheduler.scale_model_input(a__ , a__ )
lowerCAmelCase__ = model(a__ , a__ )
lowerCAmelCase__ = scheduler.step(a__ , a__ , a__ , generator=a__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(a__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase__ = sample.to(a__ )
for t in scheduler.timesteps:
lowerCAmelCase__ = scheduler.scale_model_input(a__ , a__ )
lowerCAmelCase__ = model(a__ , a__ )
lowerCAmelCase__ = scheduler.step(a__ , a__ , a__ , generator=a__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(a__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase__ = sample.to(a__ )
for t in scheduler.timesteps:
lowerCAmelCase__ = scheduler.scale_model_input(a__ , a__ )
lowerCAmelCase__ = model(a__ , a__ )
lowerCAmelCase__ = scheduler.step(a__ , a__ , a__ , generator=a__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(a__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 644 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ):
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str:
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ):
UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : int , a__ : Dict , a__ : Tuple ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : str ):
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = '''facebook/sam-vit-huge'''
UpperCAmelCase = pipeline('''mask-generation''' , model=a__ )
UpperCAmelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 51 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase=False, UpperCAmelCase=True ) ->List[str]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : str = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__magic_name__ : Dict = cached_file(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, force_download=not use_cached_models )
__magic_name__ : List[Any] = config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__magic_name__ : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__magic_name__ : int = cached_file(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__magic_name__ : Optional[int] = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if compare_with_pt_model:
__magic_name__ : str = tf_model(tf_model.dummy_inputs, training=SCREAMING_SNAKE_CASE_ ) # build the network
__magic_name__ : List[str] = torch.load(SCREAMING_SNAKE_CASE_, map_location='''cpu''' )
__magic_name__ : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_, config=SCREAMING_SNAKE_CASE_, state_dict=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__magic_name__ : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__magic_name__ : List[Any] = pto[0].numpy()
__magic_name__ : List[str] = tfo[0].numpy()
__magic_name__ : int = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE_, save_format='''h5''' )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase=None, UpperCAmelCase=None, UpperCAmelCase=False, UpperCAmelCase=False, UpperCAmelCase=False, UpperCAmelCase=False, ) ->Tuple:
"""simple docstring"""
if args_model_type is None:
__magic_name__ : int = list(MODEL_CLASSES.keys() )
else:
__magic_name__ : Tuple = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE_, start=1 ):
print('''=''' * 100 )
print(F''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' )
print('''=''' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__magic_name__ : Any = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__magic_name__ : str = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), start=1 ):
print('''-''' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__magic_name__ : Any = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' )
print('''-''' * 100 )
if config_shortcut_name in aws_config_map:
__magic_name__ : Optional[int] = cached_file(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, force_download=not use_cached_models )
else:
__magic_name__ : Optional[int] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__magic_name__ : Any = cached_file(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, force_download=not use_cached_models )
else:
__magic_name__ : int = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
__magic_name__ : List[Any] = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE_, pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_, config_file=SCREAMING_SNAKE_CASE_, tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_, model_shortcut_name + '''-tf_model.h5''' ), compare_with_pt_model=SCREAMING_SNAKE_CASE_, )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE_ )
os.remove(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
f"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 154 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __UpperCamelCase ( ) ->int:
raise RuntimeError('''CUDA out of memory.''' )
class _lowercase ( nn.Module ):
def __init__( self ) -> Optional[Any]:
super().__init__()
snake_case = nn.Linear(3 , 4 )
snake_case = nn.BatchNormad(4 )
snake_case = nn.Linear(4 , 5 )
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.lineara(self.batchnorm(self.lineara(a__ ) ) )
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Any:
snake_case = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(A__ ):
nonlocal batch_sizes
batch_sizes.append(a__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(a__ , [1_28, 64, 32, 16, 8] )
def UpperCamelCase ( self ) -> Any:
snake_case = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(A__ , A__ ):
nonlocal batch_sizes
batch_sizes.append(a__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
snake_case , snake_case = mock_training_loop_function('''hello''' )
self.assertListEqual(a__ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def UpperCamelCase ( self ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A__ ):
pass
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def UpperCamelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def UpperCamelCase ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(A__ , A__ , A__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(a__ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def UpperCamelCase ( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A__ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = torch.cuda.memory_allocated()
snake_case = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , a__ )
snake_case = release_memory(a__ )
self.assertEqual(torch.cuda.memory_allocated() , a__ )
| 342 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ):
super(a__ , self ).__init__()
UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ )
UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 )
UpperCAmelCase = torch.nn.Softmax(dim=1 )
def __snake_case ( self : List[Any] , **a__ : Tuple ):
return self.bert(**a__ ).last_hidden_state
def __snake_case ( self : int , a__ : List[str] ):
return token_embeddings.sum(2 , keepdim=a__ )
def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ):
return self.softmax(T * self.cos(a__ , a__ ) )
def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ):
UpperCAmelCase = W_supports['''sizes'''].tolist()
UpperCAmelCase = W_supports['''start_token_id'''].item()
UpperCAmelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = W_supports['''input_ids'''] == start_token_id
UpperCAmelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(a__ ):
if i == 0:
UpperCAmelCase = 0
else:
UpperCAmelCase = support_sizes[i - 1]
UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase = torch.vstack((p_starts, p_start) )
UpperCAmelCase = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase = p_start
UpperCAmelCase = p_end
return p_starts, p_ends
| 51 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = StableUnCLIPPipeline
UpperCamelCase_ : List[str] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCamelCase_ : Dict = False
def _A ( self : str ):
SCREAMING_SNAKE_CASE : str = 32
SCREAMING_SNAKE_CASE : Optional[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = StableUnCLIPImageNormalizer(embedding_dim=a__ )
SCREAMING_SNAKE_CASE : List[str] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoencoderKL()
SCREAMING_SNAKE_CASE : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _A ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=0 ):
if str(a__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(a__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=a__ ).manual_seed(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE : Dict = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe("anime turle" , generator=a__ , output_type="np" )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def _A ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : Any = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 62 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =(EulerDiscreteScheduler,)
_lowerCamelCase =10
def __snake_case ( self : str , **a__ : Tuple ):
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a__ )
return config
def __snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __snake_case ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __snake_case ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def __snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 51 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = LEDConfig
_UpperCAmelCase = {}
_UpperCAmelCase = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = bos_token_id
SCREAMING_SNAKE_CASE_ : List[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE_ : int = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE_ : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_led_inputs_dict(a__ , a__ , a__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = TFLEDModel(config=a__ ).get_decoder()
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids[:1, :]
SCREAMING_SNAKE_CASE_ : Tuple = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(a__ , attention_mask=a__ , use_cache=a__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE_ : List[Any] = model(a__ , attention_mask=a__ )[0]
SCREAMING_SNAKE_CASE_ : Any = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1E-3 )
def a__ ( A__, A__, A__, A__=None, A__=None, A__=None, A__=None, ):
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowercase (UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE_ : str = ConfigTester(self , config_class=a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = tf.zeros_like(inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.seq_length
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE_ : str = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(a__ )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(a__ , a__ ) )
SCREAMING_SNAKE_CASE_ : int = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : str = model_class(a__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : str = model_class(a__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(a__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def a__ ( A__ ):
return tf.constant(SCREAMING_SNAKE_CASE_, dtype=tf.intaa )
lowerCAmelCase__ : int =1e-4
@slow
@require_tf
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
SCREAMING_SNAKE_CASE_ : Dict = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_led_inputs_dict(model.config , a__ , a__ )
SCREAMING_SNAKE_CASE_ : int = model(**a__ )[0]
SCREAMING_SNAKE_CASE_ : Dict = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , a__ )
# change to expected output here
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
SCREAMING_SNAKE_CASE_ : str = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_led_inputs_dict(model.config , a__ , a__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**a__ )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
SCREAMING_SNAKE_CASE_ : List[Any] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1E-3 , rtol=1E-3 )
| 101 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =MgpstrTokenizer
SCREAMING_SNAKE_CASE_ : int =False
SCREAMING_SNAKE_CASE_ : Optional[Any] ={}
SCREAMING_SNAKE_CASE_ : List[str] =False
def _lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(a__ , range(len(a__ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a__ ) + '\n' )
def _lowerCamelCase ( self : Optional[Any] , **__A : Dict ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a__ )
def _lowerCamelCase ( self : List[str] , __A : Dict ):
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def _lowerCamelCase ( self : Any ):
pass
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token] , add_special_tokens=a__ )
self.assertEqual(len(a__ ) , 1 )
__UpperCamelCase = tokenizer.decode(a__ , skip_special_tokens=a__ )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase , __UpperCamelCase = self.get_input_output_texts(a__ )
__UpperCamelCase = tokenizer.tokenize(a__ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(a__ )
__UpperCamelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertNotEqual(len(a__ ) , 0 )
__UpperCamelCase = tokenizer.decode(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(text_a.replace(' ' , '' ) , a__ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def _lowerCamelCase ( self : Any ):
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def _lowerCamelCase ( self : Tuple ):
pass
| 399 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ):
UpperCAmelCase = []
for k, v in d.items():
UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file:
try:
UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader )
UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) )
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = MobileViTVaConfig()
UpperCAmelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase = 151
UpperCAmelCase = 512
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase = 21
UpperCAmelCase = 512
UpperCAmelCase = '''pascal-voc-id2label.json'''
UpperCAmelCase = True
# orig_config
UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ )
assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
"""simple docstring"""
if base_model:
UpperCAmelCase = ''''''
else:
UpperCAmelCase = '''mobilevitv2.'''
UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase = k[8:]
else:
UpperCAmelCase = k
if ".block." in k:
UpperCAmelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase = [0, 1]
elif i == 4:
UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE_ )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
else:
UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
# remove and rename some keys of load the original model
UpperCAmelCase = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
a__ : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 51 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =4_2
a__ =None
a__ =None
_lowerCAmelCase :str = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase_ (UpperCamelCase__ : TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(UpperCamelCase__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCamelCase__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE_ ) != count_coins(SCREAMING_SNAKE_CASE_ ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(UpperCamelCase__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_distrib(node.left )
_UpperCAmelCase , _UpperCAmelCase : str = get_distrib(node.right )
_UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
_UpperCAmelCase : int = 1 - right_distrib_excess
_UpperCAmelCase : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE_ )
+ abs(SCREAMING_SNAKE_CASE_ )
)
_UpperCAmelCase : List[str] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return get_distrib(SCREAMING_SNAKE_CASE_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Dict:
"""simple docstring"""
lowercase__ = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowercase__ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
lowercase__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowercase__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase__ = model(a__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , a__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a__ , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Any:
"""simple docstring"""
lowercase__ = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowercase__ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
lowercase__ = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
lowercase__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase__ = model(a__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , a__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a__ , atol=1E-3 ) )
| 235 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> int:
_UpperCAmelCase : Union[str, Any] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'{test_file} instead.' )
_UpperCAmelCase : Any = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
_UpperCAmelCase : Optional[int] = components[:-1] + [test_fn.replace(".py" , "" )]
_UpperCAmelCase : Any = ".".join(SCREAMING_SNAKE_CASE_ )
return test_module_path
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Optional[int]:
_UpperCAmelCase : List[str] = get_module_path(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Any = importlib.import_module(SCREAMING_SNAKE_CASE_ )
return test_module
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> int:
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[int] = get_test_module(SCREAMING_SNAKE_CASE_ )
for attr in dir(SCREAMING_SNAKE_CASE_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda lowerCAmelCase : x.__name__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Dict:
_UpperCAmelCase : str = []
_UpperCAmelCase : Any = get_test_module(SCREAMING_SNAKE_CASE_ )
for attr in dir(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCAmelCase : Any = getattr(SCREAMING_SNAKE_CASE_ , "all_model_classes" , [] )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda lowerCAmelCase : x.__name__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] ) -> Dict:
_UpperCAmelCase : Union[str, Any] = get_test_classes(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : List[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda lowerCAmelCase : x.__name__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] ) -> List[str]:
_UpperCAmelCase : List[str] = test_class()
if hasattr(SCREAMING_SNAKE_CASE_ , "setUp" ):
test.setUp()
_UpperCAmelCase : Optional[Any] = None
if hasattr(SCREAMING_SNAKE_CASE_ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCAmelCase : List[Any] = test.model_tester.__class__
return model_tester
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Any ) -> Optional[int]:
_UpperCAmelCase : List[Any] = get_test_classes(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda lowerCAmelCase : x.__name__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Any ) -> Optional[Any]:
_UpperCAmelCase : Dict = get_test_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Union[str, Any] = []
for test_class in test_classes:
_UpperCAmelCase : Dict = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE_ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda lowerCAmelCase : x.__name__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> int:
_UpperCAmelCase : Any = get_test_classes(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Dict = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE_ ) for test_class in test_classes}
return test_tester_mapping
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Dict:
_UpperCAmelCase : str = get_model_classes(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Tuple = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in model_classes
}
return model_test_mapping
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Any:
_UpperCAmelCase : Optional[int] = get_model_classes(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : str = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in model_classes
}
return model_to_tester_mapping
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> int:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE_ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {to_json(SCREAMING_SNAKE_CASE_ ): to_json(SCREAMING_SNAKE_CASE_ ) for k, v in o.items()}
else:
return o
| 300 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def _lowerCAmelCase ( self : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Dict:
"""simple docstring"""
with open(a__ , encoding='utf-8' ) as input_file:
SCREAMING_SNAKE_CASE : int = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
SCREAMING_SNAKE_CASE : List[str] = input_file.read()
SCREAMING_SNAKE_CASE : List[str] = regexp.search(a__ )
return match
def _lowerCAmelCase ( self : Any , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
with open(a__ , encoding='utf-8' ) as input_file:
SCREAMING_SNAKE_CASE : str = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
SCREAMING_SNAKE_CASE : str = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE : Optional[int] = regexp.finditer(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Path('./datasets' )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(a__ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Path('./datasets' )
SCREAMING_SNAKE_CASE : List[str] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(a__ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 265 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
if "model" in sd.keys():
lowerCamelCase__ : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
# pop unnecessary weights
lowerCamelCase__ : Optional[int] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : List[Any] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCamelCase__ : List[Any] = sd.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCamelCase__ : Tuple = sd[key]
# We split QKV in separate Q,K,V
lowerCamelCase__ : Union[str, Any] = key.replace('.qkv_proj.' , '.q_proj.' )
lowerCamelCase__ : List[str] = key.replace('.qkv_proj.' , '.k_proj.' )
lowerCamelCase__ : Optional[int] = key.replace('.qkv_proj.' , '.v_proj.' )
lowerCamelCase__ : Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = torch.split(SCREAMING_SNAKE_CASE_ , depth // 3 , dim=0 )
lowerCamelCase__ : List[Any] = q
lowerCamelCase__ : str = k
lowerCamelCase__ : str = v
del sd[key]
return sd
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ) -> int:
lowerCamelCase__ : int = load_checkpoint(SCREAMING_SNAKE_CASE_ )
if config is not None:
lowerCamelCase__ : List[Any] = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase__ : Any = OPTConfig()
lowerCamelCase__ : str = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check results
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
_UpperCAmelCase : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 295 |
'''simple docstring'''
from math import factorial
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int:
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 51 | 0 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 644 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class A__ ( unittest.TestCase ):
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : List[str] = inspect.getfile(accelerate.test_utils )
__magic_name__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__magic_name__ : Union[str, Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : Optional[int] = F'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split()
__magic_name__ : int = [sys.executable] + distributed_args
execute_subprocess_async(a__ , env=os.environ.copy() )
| 154 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCAmelCase, UpperCAmelCase = head.next, head
while fast and fast.next:
UpperCAmelCase = fast.next.next
UpperCAmelCase = slow.next
UpperCAmelCase = slow.next
UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase = None
while second:
UpperCAmelCase = second.next
UpperCAmelCase = node
UpperCAmelCase = second
UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase = node.next
UpperCAmelCase = head.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head
while fast and fast.next:
UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase = [slow.val]
while slow.next:
UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase = cur.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
UpperCAmelCase = {}
UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = [pos]
UpperCAmelCase = head.next
pos += 1
UpperCAmelCase = pos - 1
UpperCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 51 | 0 |
'''simple docstring'''
class _lowercase :
def __init__( self , A__ , A__ ) -> Union[str, Any]:
snake_case = name
snake_case = val
def __str__( self ) -> Union[str, Any]:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , A__ ) -> Union[str, Any]:
return self.val < other.val
class _lowercase :
def __init__( self , A__ ) -> str:
snake_case = {}
snake_case = {}
snake_case = self.build_heap(a__ )
def __getitem__( self , A__ ) -> Union[str, Any]:
return self.get_value(a__ )
def UpperCamelCase ( self , A__ ) -> List[Any]:
return (idx - 1) // 2
def UpperCamelCase ( self , A__ ) -> int:
return idx * 2 + 1
def UpperCamelCase ( self , A__ ) -> Optional[Any]:
return idx * 2 + 2
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.heap_dict[key]
def UpperCamelCase ( self , A__ ) -> Dict:
snake_case = len(a__ ) - 1
snake_case = self.get_parent_idx(a__ )
for idx, i in enumerate(a__ ):
snake_case = idx
snake_case = i.val
for i in range(a__ , -1 , -1 ):
self.sift_down(a__ , a__ )
return array
def UpperCamelCase ( self , A__ , A__ ) -> Any:
while True:
snake_case = self.get_left_child_idx(a__ ) # noqa: E741
snake_case = self.get_right_child_idx(a__ )
snake_case = idx
if l < len(a__ ) and array[l] < array[idx]:
snake_case = l
if r < len(a__ ) and array[r] < array[smallest]:
snake_case = r
if smallest != idx:
snake_case , snake_case = array[smallest], array[idx]
(
(
snake_case
) , (
snake_case
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case = smallest
else:
break
def UpperCamelCase ( self , A__ ) -> List[Any]:
snake_case = self.get_parent_idx(a__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case , snake_case = self.heap[idx], self.heap[p]
snake_case , snake_case = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case = p
snake_case = self.get_parent_idx(a__ )
def UpperCamelCase ( self ) -> Dict:
return self.heap[0]
def UpperCamelCase ( self ) -> Any:
snake_case , snake_case = self.heap[-1], self.heap[0]
snake_case , snake_case = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCamelCase ( self , A__ ) -> Optional[int]:
self.heap.append(a__ )
snake_case = len(self.heap ) - 1
snake_case = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCamelCase ( self ) -> Union[str, Any]:
return len(self.heap ) == 0
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case = new_value
snake_case = new_value
self.sift_up(self.idx_of_element[node] )
_lowercase = Node('R', -1)
_lowercase = Node('B', 6)
_lowercase = Node('A', 3)
_lowercase = Node('X', 1)
_lowercase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowercase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51 | 0 |
from math import pow
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE : int = int(pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = backtrack(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , current_number + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = backtrack(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , current_number + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return current_sum, solutions_count
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XGLMTokenizer
_lowerCamelCase =XGLMTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(a__ ) , 1008 )
def __snake_case ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ )
UpperCAmelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def __snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : Any ):
# fmt: off
UpperCAmelCase = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
| 51 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase__ : Union[str, Any] =TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase__ : Tuple =TaTokenizerFast
lowerCAmelCase__ : Optional[Any] ={'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] =[
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] =['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase__ : List[Any] =_LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 101 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class snake_case ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE_ : Any =Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =Features({"labels": ClassLabel} )
SCREAMING_SNAKE_CASE_ : Optional[Any] ="text"
SCREAMING_SNAKE_CASE_ : List[Any] ="labels"
def _lowerCamelCase ( self : str , __A : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , a__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase = copy.deepcopy(self )
__UpperCamelCase = self.label_schema.copy()
__UpperCamelCase = features[self.label_column]
__UpperCamelCase = label_schema
return task_template
@property
def _lowerCamelCase ( self : Dict ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 399 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
a__ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a__ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="whisper"
_lowerCamelCase =["past_key_values"]
_lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def __snake_case ( self : List[str] ):
UpperCAmelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
return common_inputs
def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ):
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , )
UpperCAmelCase = encoder_inputs['''input_features'''].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , a__ , a__ , a__ , a__ )
UpperCAmelCase = encoder_inputs.pop('''input_features''' )
UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def __snake_case ( self : Dict ):
return 1e-3
| 51 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase :Any = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[Any] = ['OwlViTFeatureExtractor']
_lowerCAmelCase :Dict = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 506 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =LEDConfig
_lowerCamelCase ={}
_lowerCamelCase ="gelu"
def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ )
UpperCAmelCase = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
UpperCAmelCase = global_attention_mask
return config, inputs_dict
def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ):
UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ )
UpperCAmelCase, UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3 )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase =(
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase =True
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = TFLEDModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ )
def __snake_case ( self : int ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase = 2
UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase = True
UpperCAmelCase = self.model_tester.seq_length
UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ : Tuple ):
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ : int ):
UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
a__ : int = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 )
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
| 51 | 0 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = 'sshleifer/mar_enro_6_3_student'
class SCREAMING_SNAKE_CASE (UpperCAmelCase_ ):
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
super().setUp()
lowercase__ = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=a__ , )
lowercase__ = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowercase__ = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
lowercase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(a__ , str(a__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase__ = f"""\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n """.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase__ = ['finetune.py'] + bash_script.split() + args
with patch.object(a__ , 'argv' , a__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(a__ )
lowercase__ = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
lowercase__ = parser.parse_args()
lowercase__ = main(a__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics['val'][0]
lowercase__ = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , a__ )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(a__ )
lowercase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowercase__ = os.path.join(args.output_dir , a__ )
lowercase__ = torch.load(a__ , map_location='cpu' )
lowercase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE (UpperCAmelCase_ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
lowercase__ = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowercase__ = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
lowercase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
lowercase__ = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(a__ , str(a__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = bash_script.replace('--fp16' , '' )
lowercase__ = 6
lowercase__ = (
['distillation.py']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'--gpus=1',
'--learning_rate=1e-3',
f"""--num_train_epochs={epochs}""",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(a__ , 'argv' , a__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(a__ )
lowercase__ = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
lowercase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase__ = distill_main(a__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics['val'][0]
lowercase__ = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(a__ )
lowercase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowercase__ = os.path.join(args.output_dir , a__ )
lowercase__ = torch.load(a__ , map_location='cpu' )
lowercase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 235 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE_ = 'src/transformers'
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE_ = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE_ = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE_ = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE_ = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE_ = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE_ = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE_ = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE_ = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
SCREAMING_SNAKE_CASE_ = re.compile(R'^\s*try:')
# Catches a line with else:
SCREAMING_SNAKE_CASE_ = re.compile(R'^\s*else:')
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> str:
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
_UpperCAmelCase : List[Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> int:
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : Union[str, Any] = f.readlines()
_UpperCAmelCase : Optional[Any] = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCAmelCase : List[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_UpperCAmelCase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase : Any = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
_UpperCAmelCase : int = re.findall("\[([^\]]+)\]" , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
_UpperCAmelCase : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCAmelCase : Tuple = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCAmelCase : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_UpperCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
_UpperCAmelCase : str = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
_UpperCAmelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
_UpperCAmelCase : Union[str, Any] = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
_UpperCAmelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCAmelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_UpperCAmelCase : Optional[Any] = lines[line_index]
_UpperCAmelCase : Optional[int] = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_UpperCAmelCase : Optional[int] = lines[line_index]
_UpperCAmelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCAmelCase : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict ) -> Dict:
def find_duplicates(lowerCAmelCase: Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCAmelCase : str = []
for key in import_dict_objects.keys():
_UpperCAmelCase : Any = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
_UpperCAmelCase : str = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCAmelCase : Optional[Any] = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
_UpperCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" )
_UpperCAmelCase : int = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
_UpperCAmelCase : List[str] = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_UpperCAmelCase : str = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError("\n\n".join(SCREAMING_SNAKE_CASE_ ) )
def __SCREAMING_SNAKE_CASE ( ) -> int:
_UpperCAmelCase : int = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob("*.py" ) ) ) == 0:
continue
_UpperCAmelCase : Optional[Any] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase : Union[str, Any] = short_path.replace(os.path.sep , "." )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
_UpperCAmelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase : List[str] = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
SCREAMING_SNAKE_CASE_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def __SCREAMING_SNAKE_CASE ( ) -> Any:
_UpperCAmelCase : Optional[Any] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_UpperCAmelCase : Optional[Any] = spec.loader.load_module()
_UpperCAmelCase : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_UpperCAmelCase : Any = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 300 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 | 0 |
"""simple docstring"""
def __snake_case ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
A_ : str = generate_large_matrix()
A_ : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __snake_case ( __A : list[list[int]] ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE_ ) == sorted(SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) for col in zip(*SCREAMING_SNAKE_CASE_ ) )
def __snake_case ( __A : list[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Any = len(SCREAMING_SNAKE_CASE_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE : Tuple = (left + right) // 2
SCREAMING_SNAKE_CASE : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
SCREAMING_SNAKE_CASE : List[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE_ )
def __snake_case ( __A : list[list[int]] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : List[str] = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE_ ) * len(grid[0] )) - total
def __snake_case ( __A : list[list[int]] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __snake_case ( __A : list[list[int]] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE_ ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE_ ) - i
break
return total
def __snake_case ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE : int = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE : Optional[int] = timeit(F"""{func}(grid=grid)""" , setup=SCREAMING_SNAKE_CASE_ , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 265 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 51 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class lowerCAmelCase ( UpperCAmelCase_ ):
UpperCAmelCase__ = """table-transformer"""
UpperCAmelCase__ = ["""past_key_values"""]
UpperCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , UpperCAmelCase : Any=True , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Optional[Any]=100 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Dict=6 , UpperCAmelCase : Any=2048 , UpperCAmelCase : List[Any]=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int="relu" , UpperCAmelCase : str=256 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : str=0.0 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Union[str, Any]=1.0 , UpperCAmelCase : Tuple=False , UpperCAmelCase : Any="sine" , UpperCAmelCase : Optional[int]="resnet50" , UpperCAmelCase : Dict=True , UpperCAmelCase : Dict=False , UpperCAmelCase : str=1 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Dict=1 , UpperCAmelCase : str=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Optional[Any]=0.1 , **UpperCAmelCase : Optional[int] , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCamelCase__ : str = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(a__ , a__ ):
lowerCamelCase__ : Optional[int] = backbone_config.get('model_type' )
lowerCamelCase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Union[str, Any] = config_class.from_dict(a__ )
# set timm attributes to None
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = None, None, None
lowerCamelCase__ : List[Any] = use_timm_backbone
lowerCamelCase__ : Union[str, Any] = backbone_config
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : List[Any] = num_queries
lowerCamelCase__ : List[str] = d_model
lowerCamelCase__ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : str = encoder_attention_heads
lowerCamelCase__ : Optional[int] = decoder_ffn_dim
lowerCamelCase__ : List[str] = decoder_layers
lowerCamelCase__ : int = decoder_attention_heads
lowerCamelCase__ : List[str] = dropout
lowerCamelCase__ : Optional[int] = attention_dropout
lowerCamelCase__ : Tuple = activation_dropout
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : Tuple = init_std
lowerCamelCase__ : str = init_xavier_std
lowerCamelCase__ : int = encoder_layerdrop
lowerCamelCase__ : Union[str, Any] = decoder_layerdrop
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : List[str] = auxiliary_loss
lowerCamelCase__ : List[Any] = position_embedding_type
lowerCamelCase__ : List[str] = backbone
lowerCamelCase__ : Union[str, Any] = use_pretrained_backbone
lowerCamelCase__ : str = dilation
# Hungarian matcher
lowerCamelCase__ : Dict = class_cost
lowerCamelCase__ : Optional[int] = bbox_cost
lowerCamelCase__ : str = giou_cost
# Loss coefficients
lowerCamelCase__ : List[Any] = mask_loss_coefficient
lowerCamelCase__ : Tuple = dice_loss_coefficient
lowerCamelCase__ : Dict = bbox_loss_coefficient
lowerCamelCase__ : Optional[int] = giou_loss_coefficient
lowerCamelCase__ : Optional[int] = eos_coefficient
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def A_ ( self : Dict ) -> str:
return self.encoder_attention_heads
@property
def A_ ( self : Any ) -> Optional[int]:
return self.d_model
class lowerCAmelCase ( UpperCAmelCase_ ):
UpperCAmelCase__ = version.parse("""1.11""" )
@property
def A_ ( self : Tuple ) -> int:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def A_ ( self : int ) -> Optional[int]:
return 1e-5
@property
def A_ ( self : Tuple ) -> Union[str, Any]:
return 12
| 295 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : int = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 644 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ):
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str:
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ):
UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : int , a__ : Dict , a__ : Tuple ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : str ):
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = '''facebook/sam-vit-huge'''
UpperCAmelCase = pipeline('''mask-generation''' , model=a__ )
UpperCAmelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 51 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 154 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowercase ( UpperCAmelCase_ , unittest.TestCase ):
_UpperCAmelCase = DDIMPipeline
_UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> int:
torch.manual_seed(0 )
snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
snake_case = DDIMScheduler()
snake_case = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCamelCase ( self , A__ , A__=0 ) -> List[str]:
if str(a__ ).startswith('''mps''' ):
snake_case = torch.manual_seed(a__ )
else:
snake_case = torch.Generator(device=a__ ).manual_seed(a__ )
snake_case = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ) -> Dict:
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case = self.get_dummy_inputs(a__ )
snake_case = pipe(**a__ ).images
snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
snake_case = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def UpperCamelCase ( self ) -> int:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCamelCase ( self ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCamelCase ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCamelCase ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Dict:
snake_case = '''google/ddpm-cifar10-32'''
snake_case = UNetaDModel.from_pretrained(a__ )
snake_case = DDIMScheduler()
snake_case = DDIMPipeline(unet=a__ , scheduler=a__ )
ddim.to(a__ )
ddim.set_progress_bar_config(disable=a__ )
snake_case = torch.manual_seed(0 )
snake_case = ddim(generator=a__ , eta=0.0 , output_type='''numpy''' ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = '''google/ddpm-ema-bedroom-256'''
snake_case = UNetaDModel.from_pretrained(a__ )
snake_case = DDIMScheduler.from_pretrained(a__ )
snake_case = DDIMPipeline(unet=a__ , scheduler=a__ )
ddpm.to(a__ )
ddpm.set_progress_bar_config(disable=a__ )
snake_case = torch.manual_seed(0 )
snake_case = ddpm(generator=a__ , output_type='''numpy''' ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 342 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ):
super(a__ , self ).__init__()
UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ )
UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 )
UpperCAmelCase = torch.nn.Softmax(dim=1 )
def __snake_case ( self : List[Any] , **a__ : Tuple ):
return self.bert(**a__ ).last_hidden_state
def __snake_case ( self : int , a__ : List[str] ):
return token_embeddings.sum(2 , keepdim=a__ )
def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ):
return self.softmax(T * self.cos(a__ , a__ ) )
def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ):
UpperCAmelCase = W_supports['''sizes'''].tolist()
UpperCAmelCase = W_supports['''start_token_id'''].item()
UpperCAmelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = W_supports['''input_ids'''] == start_token_id
UpperCAmelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(a__ ):
if i == 0:
UpperCAmelCase = 0
else:
UpperCAmelCase = support_sizes[i - 1]
UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase = torch.vstack((p_starts, p_start) )
UpperCAmelCase = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase = p_start
UpperCAmelCase = p_end
return p_starts, p_ends
| 51 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Any=[10, 20, 30, 40] , UpperCAmelCase_ : Any=[2, 2, 3, 2] , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : List[Any]=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Any=[2, 3, 4] , UpperCAmelCase_ : int=None , ):
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : str = num_stages
SCREAMING_SNAKE_CASE : Any = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = out_features
SCREAMING_SNAKE_CASE : Optional[Any] = out_indices
SCREAMING_SNAKE_CASE : Tuple = scope
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def _A ( self : Dict ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _A ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = ConvNextVaModel(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _A ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : str = ConvNextVaForImageClassification(a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : List[Any] = ConvNextVaBackbone(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : int = ConvNextVaBackbone(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"pixel_values": pixel_values}
return config, inputs_dict
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : List[Any] = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Any = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Any = False
UpperCamelCase_ : List[Any] = False
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = ConvNextVaModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _A ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self : Optional[Any] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _A ( self : List[str] ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _A ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _A ( self : str ):
pass
def _A ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE : Optional[Any] = True
if model_class.__name__ in [
*get_values(a__ ),
*get_values(a__ ),
]:
continue
SCREAMING_SNAKE_CASE : List[Any] = model_class(a__ )
model.to(a__ )
model.train()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(a__ , a__ , return_labels=a__ )
SCREAMING_SNAKE_CASE : List[str] = model(**a__ ).loss
loss.backward()
def _A ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if (
model_class.__name__
in [*get_values(a__ ), *get_values(a__ )]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE : Optional[int] = model_class(a__ )
model.to(a__ )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(a__ , a__ , return_labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a__ ).loss
loss.backward()
def _A ( self : str ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(a__ )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _A ( self : Any ):
def check_hidden_states_output(UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(a__ , a__ , a__ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def _A ( self : Tuple ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = ConvNextVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : Dict ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(a__ )
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[Any] = preprocessor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**a__ )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
| 62 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =(EulerDiscreteScheduler,)
_lowerCamelCase =10
def __snake_case ( self : str , **a__ : Tuple ):
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a__ )
return config
def __snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __snake_case ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __snake_case ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def __snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 51 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 101 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase__ ( __lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
__UpperCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(SCREAMING_SNAKE_CASE_ ) < version.parse('0.17.0' ):
return method
def wrapper(self : Tuple , *__lowercase : List[Any] , **__lowercase : Any ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return wrapper
| 399 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ):
UpperCAmelCase = []
for k, v in d.items():
UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file:
try:
UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader )
UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) )
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = MobileViTVaConfig()
UpperCAmelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase = 151
UpperCAmelCase = 512
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase = 21
UpperCAmelCase = 512
UpperCAmelCase = '''pascal-voc-id2label.json'''
UpperCAmelCase = True
# orig_config
UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ )
assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
"""simple docstring"""
if base_model:
UpperCAmelCase = ''''''
else:
UpperCAmelCase = '''mobilevitv2.'''
UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase = k[8:]
else:
UpperCAmelCase = k
if ".block." in k:
UpperCAmelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase = [0, 1]
elif i == 4:
UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE_ )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
else:
UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
# remove and rename some keys of load the original model
UpperCAmelCase = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
a__ : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 51 | 0 |
"""simple docstring"""
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = val
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Optional[Any] = None
def __lowerCAmelCase ( self , A ) -> Any:
if self.val:
if val < self.val:
if self.left is None:
_UpperCAmelCase : Dict = Node(a__ )
else:
self.left.insert(a__ )
elif val > self.val:
if self.right is None:
_UpperCAmelCase : Optional[Any] = Node(a__ )
else:
self.right.insert(a__ )
else:
_UpperCAmelCase : str = val
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
if root:
inorder(root.left , SCREAMING_SNAKE_CASE_ )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return arr
_UpperCAmelCase : Optional[Any] = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
_UpperCAmelCase : Optional[int] = []
inorder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 506 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 | 0 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[list[int]]:
lowercase__ = []
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ = nums.pop(0 )
lowercase__ = permute(SCREAMING_SNAKE_CASE_ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE_ )
result.extend(SCREAMING_SNAKE_CASE_ )
nums.append(SCREAMING_SNAKE_CASE_ )
return result
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
def backtrack(_SCREAMING_SNAKE_CASE ):
if start == len(SCREAMING_SNAKE_CASE_ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ , lowercase__ = nums[i], nums[start]
backtrack(start + 1 )
lowercase__ , lowercase__ = nums[i], nums[start] # backtrack
lowercase__ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowercase_ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 235 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> Optional[int]:
print("Loading config file..." )
def flatten_yaml_as_dict(lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[int]="" , lowerCAmelCase: Dict="." ):
_UpperCAmelCase : Union[str, Any] = []
for k, v in d.items():
_UpperCAmelCase : List[str] = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : int = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE_ , "r" ) as yaml_file:
try:
_UpperCAmelCase : str = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader )
_UpperCAmelCase : Tuple = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) )
return config
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: Optional[int] ) -> int:
_UpperCAmelCase : Dict = MobileViTVaConfig()
_UpperCAmelCase : Dict = False
# dataset
if task_name.startswith("imagenet1k_" ):
_UpperCAmelCase : Union[str, Any] = 1000
if int(task_name.strip().split("_" )[-1] ) == 384:
_UpperCAmelCase : List[str] = 384
else:
_UpperCAmelCase : Any = 256
_UpperCAmelCase : int = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
_UpperCAmelCase : Any = 2_1000
if int(task_name.strip().split("_" )[-1] ) == 384:
_UpperCAmelCase : Optional[Any] = 384
else:
_UpperCAmelCase : int = 256
_UpperCAmelCase : Tuple = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
_UpperCAmelCase : int = 151
_UpperCAmelCase : Dict = 512
_UpperCAmelCase : Optional[int] = "ade20k-id2label.json"
_UpperCAmelCase : Optional[int] = True
elif task_name.startswith("voc_" ):
_UpperCAmelCase : Optional[int] = 21
_UpperCAmelCase : Optional[Any] = 512
_UpperCAmelCase : List[str] = "pascal-voc-id2label.json"
_UpperCAmelCase : int = True
# orig_config
_UpperCAmelCase : List[str] = load_orig_config_file(SCREAMING_SNAKE_CASE_ )
assert getattr(SCREAMING_SNAKE_CASE_ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
_UpperCAmelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE_ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE_ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_UpperCAmelCase : int = getattr(SCREAMING_SNAKE_CASE_ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_UpperCAmelCase : Dict = getattr(SCREAMING_SNAKE_CASE_ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
_UpperCAmelCase : List[str] = getattr(SCREAMING_SNAKE_CASE_ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
_UpperCAmelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
_UpperCAmelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
_UpperCAmelCase : int = "huggingface/label-files"
_UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : str = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: int , lowerCAmelCase: List[str] ) -> str:
_UpperCAmelCase : Union[str, Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Dict = val
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: str=False ) -> int:
if base_model:
_UpperCAmelCase : Union[str, Any] = ""
else:
_UpperCAmelCase : Optional[int] = "mobilevitv2."
_UpperCAmelCase : Any = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_UpperCAmelCase : Union[str, Any] = k[8:]
else:
_UpperCAmelCase : Optional[Any] = k
if ".block." in k:
_UpperCAmelCase : List[Any] = k_new.replace(".block." , "." )
if ".conv." in k:
_UpperCAmelCase : Dict = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
_UpperCAmelCase : Dict = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
_UpperCAmelCase : Optional[Any] = k_new.replace("conv_1." , F'{model_prefix}conv_stem.' )
for i in [1, 2]:
if F'layer_{i}.' in k:
_UpperCAmelCase : int = k_new.replace(F'layer_{i}.' , F'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
_UpperCAmelCase : int = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
_UpperCAmelCase : Tuple = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if F'layer_{i}.0.' in k:
_UpperCAmelCase : Union[str, Any] = k_new.replace(F'layer_{i}.0.' , F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if F'layer_{i}.1.local_rep.0.' in k:
_UpperCAmelCase : int = k_new.replace(F'layer_{i}.1.local_rep.0.' , F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if F'layer_{i}.1.local_rep.1.' in k:
_UpperCAmelCase : Any = k_new.replace(F'layer_{i}.1.local_rep.1.' , F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
_UpperCAmelCase : Union[str, Any] = [0, 1]
elif i == 4:
_UpperCAmelCase : Any = [0, 1, 2, 3]
elif i == 5:
_UpperCAmelCase : List[Any] = [0, 1, 2]
for j in j_in:
if F'layer_{i}.1.global_rep.{j}.' in k:
_UpperCAmelCase : Dict = k_new.replace(
F'layer_{i}.1.global_rep.{j}.' , F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if F'layer_{i}.1.global_rep.{j+1}.' in k:
_UpperCAmelCase : Any = k_new.replace(
F'layer_{i}.1.global_rep.{j+1}.' , F'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if F'layer_{i}.1.conv_proj.' in k:
_UpperCAmelCase : Dict = k_new.replace(F'layer_{i}.1.conv_proj.' , F'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
_UpperCAmelCase : Union[str, Any] = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
_UpperCAmelCase : Optional[Any] = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
_UpperCAmelCase : Union[str, Any] = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
_UpperCAmelCase : Optional[int] = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
_UpperCAmelCase : Tuple = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
_UpperCAmelCase : Any = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
_UpperCAmelCase : List[str] = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
_UpperCAmelCase : List[str] = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
_UpperCAmelCase : List[Any] = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> int:
_UpperCAmelCase : str = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE_ )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_UpperCAmelCase : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: List[str] , lowerCAmelCase: str , lowerCAmelCase: Optional[Any] ) -> Tuple:
_UpperCAmelCase : str = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load original state_dict
_UpperCAmelCase : List[str] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
_UpperCAmelCase : Any = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
_UpperCAmelCase : Optional[int] = False
else:
_UpperCAmelCase : Optional[Any] = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
_UpperCAmelCase : Dict = False
# remove and rename some keys of load the original model
_UpperCAmelCase : List[str] = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Optional[Any] = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_UpperCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCAmelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
# verify classification model
if task_name.startswith("imagenet" ):
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase : Optional[int] = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_UpperCAmelCase : str = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 300 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def __snake_case ( __A : int = 1000000 , __A : int = 10 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = defaultdict(SCREAMING_SNAKE_CASE_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE : List[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE : Tuple = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 265 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 4 ) -> list[list[int]]:
lowerCamelCase__ : List[Any] = abs(SCREAMING_SNAKE_CASE_ ) or 4
return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE_ )] for y in range(SCREAMING_SNAKE_CASE_ )]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
return reverse_row(transpose(SCREAMING_SNAKE_CASE_ ) )
# OR.. transpose(reverse_column(matrix))
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
return reverse_row(reverse_column(SCREAMING_SNAKE_CASE_ ) )
# OR.. reverse_column(reverse_row(matrix))
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
return reverse_column(transpose(SCREAMING_SNAKE_CASE_ ) )
# OR.. transpose(reverse_row(matrix))
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : List[Any] = [list(SCREAMING_SNAKE_CASE_ ) for x in zip(*SCREAMING_SNAKE_CASE_ )]
return matrix
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : int = matrix[::-1]
return matrix
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : int = [x[::-1] for x in matrix]
return matrix
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
for i in matrix:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_UpperCAmelCase : str = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
_UpperCAmelCase : List[str] = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
_UpperCAmelCase : Union[str, Any] = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 295 |
'''simple docstring'''
from math import factorial
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int:
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 51 | 0 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=SCREAMING_SNAKE_CASE_ , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=SCREAMING_SNAKE_CASE_ , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=SCREAMING_SNAKE_CASE_ , help="""where to store parsed gold_data_path file""" , )
lowerCAmelCase__ = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = dpr_record["""question"""]
lowerCAmelCase__ = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(SCREAMING_SNAKE_CASE_ ) + """\n""" )
if __name__ == "__main__":
main()
| 644 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class A__ ( UpperCAmelCase_ ):
lowerCamelCase__ : Optional[Any] =field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase__ : str =Features({"image": Image()} )
lowerCamelCase__ : Union[str, Any] =Features({"labels": ClassLabel} )
lowerCamelCase__ : str ="image"
lowerCamelCase__ : int ="labels"
def lowercase ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , a__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__magic_name__ : str = copy.deepcopy(self )
__magic_name__ : Dict = self.label_schema.copy()
__magic_name__ : Union[str, Any] = features[self.label_column]
__magic_name__ : Optional[Any] = label_schema
return task_template
@property
def lowercase ( self ) -> str:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 154 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCAmelCase, UpperCAmelCase = head.next, head
while fast and fast.next:
UpperCAmelCase = fast.next.next
UpperCAmelCase = slow.next
UpperCAmelCase = slow.next
UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase = None
while second:
UpperCAmelCase = second.next
UpperCAmelCase = node
UpperCAmelCase = second
UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase = node.next
UpperCAmelCase = head.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head
while fast and fast.next:
UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase = [slow.val]
while slow.next:
UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase = cur.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
UpperCAmelCase = {}
UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = [pos]
UpperCAmelCase = head.next
pos += 1
UpperCAmelCase = pos - 1
UpperCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 51 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 342 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = 4_2
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase_ : Tuple[int] = (64,) , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "silu" , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : float = 0.18_215 , UpperCAmelCase_ : str = "group" , ):
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE : int = Encoder(
in_channels=a__ , out_channels=a__ , down_block_types=a__ , block_out_channels=a__ , layers_per_block=a__ , act_fn=a__ , norm_num_groups=a__ , double_z=a__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE : List[str] = nn.Convad(a__ , a__ , 1 )
SCREAMING_SNAKE_CASE : List[str] = VectorQuantizer(a__ , a__ , beta=0.25 , remap=a__ , sane_index_shape=a__ )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Convad(a__ , a__ , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE : List[Any] = Decoder(
in_channels=a__ , out_channels=a__ , up_block_types=a__ , block_out_channels=a__ , layers_per_block=a__ , act_fn=a__ , norm_num_groups=a__ , norm_type=a__ , )
@apply_forward_hook
def _A ( self : Tuple , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True ):
SCREAMING_SNAKE_CASE : int = self.encoder(a__ )
SCREAMING_SNAKE_CASE : Dict = self.quant_conv(a__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=a__ )
@apply_forward_hook
def _A ( self : str , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.quantize(a__ )
else:
SCREAMING_SNAKE_CASE : int = h
SCREAMING_SNAKE_CASE : List[str] = self.post_quant_conv(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.decoder(a__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
def _A ( self : List[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True ):
SCREAMING_SNAKE_CASE : List[Any] = sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.encode(a__ ).latents
SCREAMING_SNAKE_CASE : Optional[int] = self.decode(a__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
| 62 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XGLMTokenizer
_lowerCamelCase =XGLMTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(a__ ) , 1008 )
def __snake_case ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ )
UpperCAmelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def __snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : Any ):
# fmt: off
UpperCAmelCase = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
| 51 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __lowercase (UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : int = seq_length
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE_ : Any = num_choices
SCREAMING_SNAKE_CASE_ : Optional[Any] = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = DistilBertModel(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(a__ , a__ )
SCREAMING_SNAKE_CASE_ : Any = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DistilBertForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DistilBertForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
a__ , attention_mask=a__ , start_positions=a__ , end_positions=a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = DistilBertForSequenceClassification(a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = DistilBertForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_choices
SCREAMING_SNAKE_CASE_ : Any = DistilBertForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
a__ , attention_mask=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : int = config_and_inputs
SCREAMING_SNAKE_CASE_ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_UpperCAmelCase = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConfigTester(self , config_class=a__ , dim=3_7 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Any = DistilBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(config=a__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(a__ , a__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.jit.trace(
a__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a__ , os.path.join(a__ , 'traced_model.pt' ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.jit.load(os.path.join(a__ , 'traced_model.pt' ) , map_location=a__ )
loaded(inputs_dict['input_ids'].to(a__ ) , inputs_dict['attention_mask'].to(a__ ) )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(a__ , attention_mask=a__ )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1E-4 ) )
| 101 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : int ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = SamImageProcessor()
__UpperCamelCase = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : str , **__A : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def _lowerCamelCase ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
__UpperCamelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = SamProcessor(image_processor=a__ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(a__ , return_tensors='np' )
__UpperCamelCase = processor(images=a__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = SamProcessor(image_processor=a__ )
__UpperCamelCase = [torch.ones((1, 3, 5, 5) )]
__UpperCamelCase = [[1_7_6_4, 2_6_4_6]]
__UpperCamelCase = [[6_8_3, 1_0_2_4]]
__UpperCamelCase = processor.post_process_masks(a__ , a__ , a__ )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__UpperCamelCase = processor.post_process_masks(
a__ , torch.tensor(a__ ) , torch.tensor(a__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
__UpperCamelCase = [np.ones((1, 3, 5, 5) )]
__UpperCamelCase = processor.post_process_masks(a__ , np.array(a__ ) , np.array(a__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__UpperCamelCase = [[1, 0], [0, 1]]
with self.assertRaises(a__ ):
__UpperCamelCase = processor.post_process_masks(a__ , np.array(a__ ) , np.array(a__ ) )
@require_vision
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = SamImageProcessor()
__UpperCamelCase = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : Optional[int] , **__A : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def _lowerCamelCase ( self : int ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : str ):
__UpperCamelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
__UpperCamelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = SamProcessor(image_processor=a__ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(a__ , return_tensors='np' )
__UpperCamelCase = processor(images=a__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = SamProcessor(image_processor=a__ )
__UpperCamelCase = [tf.ones((1, 3, 5, 5) )]
__UpperCamelCase = [[1_7_6_4, 2_6_4_6]]
__UpperCamelCase = [[6_8_3, 1_0_2_4]]
__UpperCamelCase = processor.post_process_masks(a__ , a__ , a__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__UpperCamelCase = processor.post_process_masks(
a__ , tf.convert_to_tensor(a__ ) , tf.convert_to_tensor(a__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
__UpperCamelCase = [np.ones((1, 3, 5, 5) )]
__UpperCamelCase = processor.post_process_masks(
a__ , np.array(a__ ) , np.array(a__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
__UpperCamelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__UpperCamelCase = processor.post_process_masks(
a__ , np.array(a__ ) , np.array(a__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = SamImageProcessor()
__UpperCamelCase = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : Union[str, Any] , **__A : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def _lowerCamelCase ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = SamProcessor(image_processor=a__ )
__UpperCamelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__UpperCamelCase = [tf.convert_to_tensor(a__ )]
__UpperCamelCase = [torch.tensor(a__ )]
__UpperCamelCase = [[1_7_6_4, 2_6_4_6]]
__UpperCamelCase = [[6_8_3, 1_0_2_4]]
__UpperCamelCase = processor.post_process_masks(
a__ , a__ , a__ , return_tensors='tf' )
__UpperCamelCase = processor.post_process_masks(
a__ , a__ , a__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = SamProcessor(image_processor=a__ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(a__ , return_tensors='pt' )['pixel_values'].numpy()
__UpperCamelCase = processor(images=a__ , return_tensors='pt' )['pixel_values'].numpy()
__UpperCamelCase = image_processor(a__ , return_tensors='tf' )['pixel_values'].numpy()
__UpperCamelCase = processor(images=a__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(a__ , a__ ) )
self.assertTrue(np.allclose(a__ , a__ ) )
self.assertTrue(np.allclose(a__ , a__ ) )
| 399 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
a__ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a__ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="whisper"
_lowerCamelCase =["past_key_values"]
_lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def __snake_case ( self : List[str] ):
UpperCAmelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
return common_inputs
def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ):
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , )
UpperCAmelCase = encoder_inputs['''input_features'''].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , a__ , a__ , a__ , a__ )
UpperCAmelCase = encoder_inputs.pop('''input_features''' )
UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def __snake_case ( self : Dict ):
return 1e-3
| 51 | 0 |
"""simple docstring"""
from math import loga
def lowerCamelCase_ (UpperCamelCase__ : int ):
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =LEDConfig
_lowerCamelCase ={}
_lowerCamelCase ="gelu"
def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ )
UpperCAmelCase = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
UpperCAmelCase = global_attention_mask
return config, inputs_dict
def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ):
UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ )
UpperCAmelCase, UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3 )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase =(
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase =True
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = TFLEDModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ )
def __snake_case ( self : int ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase = 2
UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase = True
UpperCAmelCase = self.model_tester.seq_length
UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ : Tuple ):
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ : int ):
UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
a__ : int = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 )
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
| 51 | 0 |
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 235 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> YolosConfig:
_UpperCAmelCase : Optional[int] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_UpperCAmelCase : Dict = 192
_UpperCAmelCase : Tuple = 768
_UpperCAmelCase : Any = 12
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Any = [800, 1333]
_UpperCAmelCase : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
_UpperCAmelCase : int = 330
_UpperCAmelCase : Any = 14
_UpperCAmelCase : List[str] = 6
_UpperCAmelCase : Optional[int] = 1320
elif "yolos_s" in yolos_name:
_UpperCAmelCase : str = 384
_UpperCAmelCase : Optional[Any] = 1536
_UpperCAmelCase : List[Any] = 12
_UpperCAmelCase : Optional[int] = 6
elif "yolos_b" in yolos_name:
_UpperCAmelCase : Union[str, Any] = [800, 1344]
_UpperCAmelCase : Optional[Any] = 91
_UpperCAmelCase : Any = "huggingface/label-files"
_UpperCAmelCase : Optional[int] = "coco-detection-id2label.json"
_UpperCAmelCase : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Union[str, Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Dict = idalabel
_UpperCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: dict , lowerCAmelCase: YolosConfig , lowerCAmelCase: bool = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : Union[str, Any] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_UpperCAmelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : List[Any] = in_proj_weight[-config.hidden_size :, :]
_UpperCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> str:
if "backbone" in name:
_UpperCAmelCase : List[str] = name.replace("backbone" , "vit" )
if "cls_token" in name:
_UpperCAmelCase : Any = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
_UpperCAmelCase : Tuple = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
_UpperCAmelCase : Optional[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
_UpperCAmelCase : List[str] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_UpperCAmelCase : Optional[Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
_UpperCAmelCase : List[Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase : Dict = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase : int = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase : str = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
_UpperCAmelCase : str = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
_UpperCAmelCase : Optional[Any] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
_UpperCAmelCase : List[str] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: dict , lowerCAmelCase: YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
_UpperCAmelCase : Any = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
_UpperCAmelCase : Dict = key.split("." )
_UpperCAmelCase : List[Any] = int(key_split[2] )
_UpperCAmelCase : List[str] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_UpperCAmelCase : Optional[int] = val[:dim, :]
_UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
_UpperCAmelCase : List[Any] = val[-dim:, :]
else:
_UpperCAmelCase : Any = val[:dim]
_UpperCAmelCase : Tuple = val[dim : dim * 2]
_UpperCAmelCase : List[Any] = val[-dim:]
else:
_UpperCAmelCase : Optional[int] = val
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( ) -> torch.Tensor:
_UpperCAmelCase : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: bool = False ) -> str:
_UpperCAmelCase : Tuple = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
_UpperCAmelCase : str = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
# load 🤗 model
_UpperCAmelCase : Union[str, Any] = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
_UpperCAmelCase : int = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
_UpperCAmelCase : str = 800 if yolos_name != "yolos_ti" else 512
_UpperCAmelCase : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCAmelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = outputs.logits, outputs.pred_boxes
_UpperCAmelCase , _UpperCAmelCase : str = None, None
if yolos_name == "yolos_ti":
_UpperCAmelCase : Tuple = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
_UpperCAmelCase : str = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
_UpperCAmelCase : Dict = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
_UpperCAmelCase : int = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
_UpperCAmelCase : Any = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
_UpperCAmelCase : int = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
_UpperCAmelCase : Optional[int] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
_UpperCAmelCase : Dict = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
_UpperCAmelCase : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="hustvl" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="hustvl" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 300 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 | 0 |
"""simple docstring"""
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __snake_case ( __A : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = botoa.client('iam' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=SCREAMING_SNAKE_CASE_ , AssumeRolePolicyDocument=json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=SCREAMING_SNAKE_CASE_ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def __snake_case ( __A : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = botoa.client('iam' )
return iam_client.get_role(RoleName=SCREAMING_SNAKE_CASE_ )["Role"]["Arn"]
def __snake_case ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE : List[Any] = None
if credentials_configuration == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
SCREAMING_SNAKE_CASE : Any = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
SCREAMING_SNAKE_CASE : List[Any] = _ask_field('AWS Access Key ID: ' )
SCREAMING_SNAKE_CASE : Any = aws_access_key_id
SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field('AWS Secret Access Key: ' )
SCREAMING_SNAKE_CASE : List[Any] = aws_secret_access_key
SCREAMING_SNAKE_CASE : str = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
SCREAMING_SNAKE_CASE : Dict = aws_region
SCREAMING_SNAKE_CASE : List[str] = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , SCREAMING_SNAKE_CASE_ , )
if role_management == 0:
SCREAMING_SNAKE_CASE : str = _ask_field('Enter your IAM role name: ' )
else:
SCREAMING_SNAKE_CASE : Tuple = 'accelerate_sagemaker_execution_role'
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Optional[int] = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
SCREAMING_SNAKE_CASE : Dict = None
if is_custom_docker_image:
SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field('Enter your Docker image: ' , lambda __A : str(SCREAMING_SNAKE_CASE_ ).lower() )
SCREAMING_SNAKE_CASE : Dict = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
SCREAMING_SNAKE_CASE : int = None
if is_sagemaker_inputs_enabled:
SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda __A : str(SCREAMING_SNAKE_CASE_ ).lower() , )
SCREAMING_SNAKE_CASE : Any = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
SCREAMING_SNAKE_CASE : Tuple = None
if is_sagemaker_metrics_enabled:
SCREAMING_SNAKE_CASE : int = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda __A : str(SCREAMING_SNAKE_CASE_ ).lower() , )
SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : int = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
if use_dynamo:
SCREAMING_SNAKE_CASE : Dict = 'dynamo_'
SCREAMING_SNAKE_CASE : List[Any] = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
if use_custom_options:
SCREAMING_SNAKE_CASE : Optional[Any] = _ask_options(
'Which mode do you want to use?' , SCREAMING_SNAKE_CASE_ , lambda __A : TORCH_DYNAMO_MODES[int(SCREAMING_SNAKE_CASE_ )] , default='default' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
SCREAMING_SNAKE_CASE : str = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
SCREAMING_SNAKE_CASE : Tuple = _ask_options(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lambda __A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(SCREAMING_SNAKE_CASE_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(SCREAMING_SNAKE_CASE_ , lambda __A : str(SCREAMING_SNAKE_CASE_ ).lower() , default='ml.p3.2xlarge' )
SCREAMING_SNAKE_CASE : str = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
SCREAMING_SNAKE_CASE : Tuple = _ask_field(
'How many machines do you want use? [1]: ' , SCREAMING_SNAKE_CASE_ , default=1 , )
SCREAMING_SNAKE_CASE : Dict = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=SCREAMING_SNAKE_CASE_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=SCREAMING_SNAKE_CASE_ , use_cpu=SCREAMING_SNAKE_CASE_ , dynamo_config=SCREAMING_SNAKE_CASE_ , eca_instance_type=SCREAMING_SNAKE_CASE_ , profile=SCREAMING_SNAKE_CASE_ , region=SCREAMING_SNAKE_CASE_ , iam_role_name=SCREAMING_SNAKE_CASE_ , mixed_precision=SCREAMING_SNAKE_CASE_ , num_machines=SCREAMING_SNAKE_CASE_ , sagemaker_inputs_file=SCREAMING_SNAKE_CASE_ , sagemaker_metrics_file=SCREAMING_SNAKE_CASE_ , )
| 265 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 51 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : str ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Tuple = TFAutoModel.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Tuple = AutoModel.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def A_ ( self : Optional[Any] ) -> Dict:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : List[Any] = TFAutoModelForPreTraining.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Any = AutoModelForPreTraining.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def A_ ( self : Dict ) -> List[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(a__ , from_pt=a__ )
lowerCamelCase__ , lowerCamelCase__ : str = TFAutoModelForCausalLM.from_pretrained(
a__ , output_loading_info=a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(a__ , from_tf=a__ )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(
a__ , output_loading_info=a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def A_ ( self : Any ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Optional[int] = TFAutoModelWithLMHead.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def A_ ( self : Optional[int] ) -> str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(a__ , from_pt=a__ )
lowerCamelCase__ , lowerCamelCase__ : Tuple = TFAutoModelForMaskedLM.from_pretrained(
a__ , output_loading_info=a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Optional[Any] = AutoModelForMaskedLM.from_pretrained(a__ , from_tf=a__ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
a__ , output_loading_info=a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def A_ ( self : List[str] ) -> Any:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a__ , from_pt=a__ )
lowerCamelCase__ , lowerCamelCase__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
a__ , output_loading_info=a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : str = AutoModelForSeqaSeqLM.from_pretrained(a__ , from_tf=a__ )
lowerCamelCase__ , lowerCamelCase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
a__ , output_loading_info=a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def A_ ( self : Tuple ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCamelCase__ : str = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : int = AutoModelForSequenceClassification.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def A_ ( self : List[Any] ) -> List[str]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(a__ , from_pt=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
lowerCamelCase__ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(a__ , from_tf=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
def A_ ( self : List[Any] ) -> int:
lowerCamelCase__ : Tuple = TFAutoModelWithLMHead.from_pretrained(a__ , from_pt=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 14410 )
lowerCamelCase__ : int = AutoModelWithLMHead.from_pretrained(a__ , from_tf=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 14410 )
def A_ ( self : Tuple ) -> str:
lowerCamelCase__ : Any = TFAutoModelWithLMHead.from_pretrained(a__ , from_pt=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 14410 )
lowerCamelCase__ : List[Any] = AutoModelWithLMHead.from_pretrained(a__ , from_tf=a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 14410 )
| 295 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a_ :
def __init__( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Tuple=13 , snake_case__ : Optional[int]=10 , snake_case__ : Any=3 , snake_case__ : Tuple=2 , snake_case__ : Tuple=2 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=True , snake_case__ : Dict=True , snake_case__ : int=32 , snake_case__ : List[str]=5 , snake_case__ : Union[str, Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Tuple=10 , snake_case__ : Dict=0.02 , snake_case__ : Dict=0.9 , snake_case__ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = tubelet_size
lowerCAmelCase__ = num_frames
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = mask_ratio
lowerCAmelCase__ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCAmelCase__ = int(mask_ratio * self.seq_length )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Any ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Dict ):
lowerCAmelCase__ = VideoMAEModel(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : int ):
lowerCAmelCase__ = VideoMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCAmelCase__ = torch.ones((self.num_masks,) )
lowerCAmelCase__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCAmelCase__ = mask.expand(self.batch_size , -1 ).bool()
lowerCAmelCase__ = model(a__ , a__ )
# model only returns predictions for masked patches
lowerCAmelCase__ = mask.sum().item()
lowerCAmelCase__ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCamelCase_ : Tuple = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : int = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Any = False
UpperCamelCase_ : Dict = False
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = VideoMAEModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : int=False ):
lowerCAmelCase__ = copy.deepcopy(a__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCAmelCase__ = torch.ones((self.model_tester.num_masks,) )
lowerCAmelCase__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCAmelCase__ = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCAmelCase__ = bool_masked_pos.to(a__ )
if return_labels:
if model_class in [
*get_values(a__ ),
]:
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = VideoMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
if not self.has_attentions:
pass
else:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCAmelCase__ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a__ , a__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a__ , a__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase__ = len(a__ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 1 , len(a__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _SCREAMING_SNAKE_CASE ( self : int ):
def check_hidden_states_output(snake_case__ : int , snake_case__ : str , snake_case__ : Dict ):
lowerCAmelCase__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a__ , a__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a__ ) , a__ )
lowerCAmelCase__ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCAmelCase__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
pass
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowerCAmelCase__ = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
a__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**a__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , a__ )
lowerCAmelCase__ = torch.tensor([0.3669, -0.0688, -0.2421] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(a__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(a__ , return_tensors="""pt""" ).to(a__ )
# add boolean mask, indicating which patches to mask
lowerCAmelCase__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowerCAmelCase__ = torch.load(a__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**a__ )
# verify the logits
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=a__ )
self.assertEqual(outputs.logits.shape , a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCAmelCase__ = torch.tensor([0.5142] , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCAmelCase__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=a__ ).to(
a__ )
with torch.no_grad():
lowerCAmelCase__ = model(**a__ )
lowerCAmelCase__ = torch.tensor(torch.tensor([0.6469] ) , device=a__ )
self.assertTrue(torch.allclose(outputs.loss , a__ , atol=1E-4 ) )
| 644 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ):
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str:
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ):
UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : int , a__ : Dict , a__ : Tuple ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : str ):
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = '''facebook/sam-vit-huge'''
UpperCAmelCase = pipeline('''mask-generation''' , model=a__ )
UpperCAmelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 51 | 0 |
from collections import defaultdict
class A__ :
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : str = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__magic_name__ : Tuple = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(a__ ) )
]
__magic_name__ : str = defaultdict(a__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__magic_name__ : Dict = (1 << len(a__ )) - 1
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__magic_name__ : int = self.count_ways_until(a__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__magic_name__ : int = total_ways_util
return self.dp[mask][task_no]
def lowercase ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
for i in range(len(a__ ) ):
for j in task_performed[i]:
self.task[j].append(a__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowercase_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowercase_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 154 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 342 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ):
super(a__ , self ).__init__()
UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ )
UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 )
UpperCAmelCase = torch.nn.Softmax(dim=1 )
def __snake_case ( self : List[Any] , **a__ : Tuple ):
return self.bert(**a__ ).last_hidden_state
def __snake_case ( self : int , a__ : List[str] ):
return token_embeddings.sum(2 , keepdim=a__ )
def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ):
return self.softmax(T * self.cos(a__ , a__ ) )
def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ):
UpperCAmelCase = W_supports['''sizes'''].tolist()
UpperCAmelCase = W_supports['''start_token_id'''].item()
UpperCAmelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = W_supports['''input_ids'''] == start_token_id
UpperCAmelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(a__ ):
if i == 0:
UpperCAmelCase = 0
else:
UpperCAmelCase = support_sizes[i - 1]
UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase = torch.vstack((p_starts, p_start) )
UpperCAmelCase = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase = p_start
UpperCAmelCase = p_end
return p_starts, p_ends
| 51 | 0 |
from __future__ import annotations
snake_case = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : dict[str, list[str]] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : str = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Optional[int] = source_vertex
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : str = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : Dict = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
SCREAMING_SNAKE_CASE : Tuple = vertex
queue.append(a__ )
def _A ( self : Any , UpperCAmelCase_ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Union[str, Any] = self.parent.get(a__ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : int = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
snake_case = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 62 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =(EulerDiscreteScheduler,)
_lowerCamelCase =10
def __snake_case ( self : str , **a__ : Tuple ):
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a__ )
return config
def __snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __snake_case ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __snake_case ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def __snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 51 | 0 |
def a__ ( A__, A__, A__, A__=None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = True, True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dfs(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return path
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : str = -1
for i in range(SCREAMING_SNAKE_CASE_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = check_circuit_or_path(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
if check == 2:
SCREAMING_SNAKE_CASE_ : List[str] = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dfs(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Tuple = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE_ : int = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE_ : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE_ : Dict = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE_ : str = 1_0
check_euler(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 101 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 0 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase__ ( __lowercase : Any ) -> int:
"""simple docstring"""
__UpperCamelCase = args.pruning_method
__UpperCamelCase = args.threshold
__UpperCamelCase = args.model_name_or_path.rstrip('/' )
__UpperCamelCase = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
__UpperCamelCase = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , 'pytorch_model.bin' ) )
__UpperCamelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__UpperCamelCase = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
__UpperCamelCase = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
__UpperCamelCase = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
__UpperCamelCase = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__UpperCamelCase = name[:-6]
__UpperCamelCase = model[F'''{prefix_}mask_scores''']
__UpperCamelCase = TopKBinarizer.apply(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__UpperCamelCase = name[:-6]
__UpperCamelCase = model[F'''{prefix_}mask_scores''']
__UpperCamelCase = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__UpperCamelCase = name[:-6]
__UpperCamelCase = model[F'''{prefix_}mask_scores''']
__UpperCamelCase , __UpperCamelCase = -0.1, 1.1
__UpperCamelCase = torch.sigmoid(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = s * (r - l) + l
__UpperCamelCase = s_bar.clamp(min=0.0 , max=1.0 )
__UpperCamelCase = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
__UpperCamelCase = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE_ ) , F'''bertarized_{os.path.basename(SCREAMING_SNAKE_CASE_ )}''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
shutil.copytree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
a__ : List[str] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
a__ : List[Any] =parser.parse_args()
main(args)
| 399 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ):
UpperCAmelCase = []
for k, v in d.items():
UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file:
try:
UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader )
UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) )
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = MobileViTVaConfig()
UpperCAmelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase = 151
UpperCAmelCase = 512
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase = 21
UpperCAmelCase = 512
UpperCAmelCase = '''pascal-voc-id2label.json'''
UpperCAmelCase = True
# orig_config
UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ )
assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
"""simple docstring"""
if base_model:
UpperCAmelCase = ''''''
else:
UpperCAmelCase = '''mobilevitv2.'''
UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase = k[8:]
else:
UpperCAmelCase = k
if ".block." in k:
UpperCAmelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase = [0, 1]
elif i == 4:
UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE_ )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
else:
UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
# remove and rename some keys of load the original model
UpperCAmelCase = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
a__ : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 51 | 0 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCamelCase_ ():
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=SCREAMING_SNAKE_CASE_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=SCREAMING_SNAKE_CASE_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=SCREAMING_SNAKE_CASE_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=SCREAMING_SNAKE_CASE_ , default=0 , help='''cuda_id.''' , )
_UpperCAmelCase : str = parser.parse_args()
return args
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict ):
if not len(SCREAMING_SNAKE_CASE_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = imgs[0].size
_UpperCAmelCase : int = Image.new('''RGB''' , size=(cols * w, rows * h) )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = grid.size
for i, img in enumerate(SCREAMING_SNAKE_CASE_ ):
grid.paste(SCREAMING_SNAKE_CASE_ , box=(i % cols * w, i // cols * h) )
return grid
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]="robotic cat with wings" , UpperCamelCase__ : int=7.5 , UpperCamelCase__ : Optional[int]=50 , UpperCamelCase__ : int=1 , UpperCamelCase__ : Tuple=42 , ):
_UpperCAmelCase : List[str] = torch.Generator(pipeline.device ).manual_seed(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Tuple = pipeline(
SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , ).images
_UpperCAmelCase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase : str = image_grid(SCREAMING_SNAKE_CASE_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_lowerCAmelCase :Union[str, Any] = parse_args()
# Load models and create wrapper for stable diffusion
_lowerCAmelCase :List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
_lowerCAmelCase :str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
_lowerCAmelCase :Any = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
_lowerCAmelCase :int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
_lowerCAmelCase :Union[str, Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_lowerCAmelCase :Tuple = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
_lowerCAmelCase :Optional[int] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
_lowerCAmelCase :Optional[int] = unet.to(torch.device('cuda', args.cuda_id))
_lowerCAmelCase :Dict = pipeline.to(unet.device)
_lowerCAmelCase :Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
_lowerCAmelCase :Optional[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 506 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE (UpperCAmelCase_ ):
def __init__( self : Optional[int] , *a : List[Any] , a : Tuple=None , a : List[str]=None , **a : Any )-> Dict:
"""simple docstring"""
super().__init__(*a__ , **a__ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Any=None , a : Optional[Any]=None , a : Dict=None , a : str = "eval" )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(a__ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
a__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(a__ , a__ , output.predictions )
lowercase__ = self.compute_metrics(a__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(a__ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(a__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , a__ )
return metrics
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[int] , a : List[Any] , a : List[str]=None , a : str = "test" )-> int:
"""simple docstring"""
lowercase__ = self.get_test_dataloader(a__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
a__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(a__ , a__ , output.predictions , 'predict' )
lowercase__ = self.compute_metrics(a__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(a__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a__ )
| 235 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 0 |
SCREAMING_SNAKE_CASE_ = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
SCREAMING_SNAKE_CASE_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 300 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = 42
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : int = None
def __snake_case ( ) -> Node | None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = Node(1 )
SCREAMING_SNAKE_CASE : Optional[Any] = Node(2 )
SCREAMING_SNAKE_CASE : Dict = Node(3 )
SCREAMING_SNAKE_CASE : str = Node(4 )
SCREAMING_SNAKE_CASE : Any = Node(5 )
return tree
def __snake_case ( __A : Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __snake_case ( __A : Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __snake_case ( __A : Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __snake_case ( __A : Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __snake_case ( __A : Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = []
if root is None:
return output
SCREAMING_SNAKE_CASE : List[str] = deque([root] )
while process_queue:
SCREAMING_SNAKE_CASE : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __snake_case ( __A : Node | None , __A : int ) -> Sequence[Node | None]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
def populate_output(__A : Node | None , __A : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output
def __snake_case ( __A : Node | None , __A : int ) -> Sequence[Node | None]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
def populate_output(__A : Node | None , __A : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output
def __snake_case ( __A : Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = height(SCREAMING_SNAKE_CASE_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
return output
def __snake_case ( ) -> None: # Main function for testing.
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = make_tree()
print(F"""In-order Traversal: {inorder(SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE_ )}""" , '\n' )
print(F"""Height of Tree: {height(SCREAMING_SNAKE_CASE_ )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(SCREAMING_SNAKE_CASE_ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(SCREAMING_SNAKE_CASE_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , level=SCREAMING_SNAKE_CASE_ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 265 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase ( UpperCAmelCase_, unittest.TestCase ):
UpperCAmelCase__ = RoCBertTokenizer
UpperCAmelCase__ = None
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = filter_non_english
def A_ ( self : Any ) -> List[str]:
super().setUp()
lowerCamelCase__ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Optional[int] = {}
for i, value in enumerate(a__ ):
lowerCamelCase__ : List[str] = i
lowerCamelCase__ : Any = i
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(a__ , a__ , ensure_ascii=a__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(a__ , a__ , ensure_ascii=a__ )
def A_ ( self : List[Any] ) -> Any:
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase__ : List[Any] = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(a__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(a__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(a__ ) , [5, 6, 2, 5, 7, 8] )
def A_ ( self : Optional[int] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self : Optional[int] ) -> Any:
lowerCamelCase__ : List[str] = RoCBertBasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ : List[str] = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self : Optional[int] ) -> Tuple:
lowerCamelCase__ : List[str] = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Tuple ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : int ) -> Dict:
lowerCamelCase__ : Any = RoCBertBasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[Any] ) -> Any:
lowerCamelCase__ : str = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=a__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase__ : List[Any] = {}
for i, token in enumerate(a__ ):
lowerCamelCase__ : Any = i
lowerCamelCase__ : List[Any] = RoCBertWordpieceTokenizer(vocab=a__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def A_ ( self : int ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self : Any ) -> str:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def A_ ( self : Optional[int] ) -> Dict:
lowerCamelCase__ : List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
lowerCamelCase__ : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(a__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def A_ ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
lowerCamelCase__ : int = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase__ : Optional[int] = tokenizer_r.encode_plus(
a__ , return_attention_mask=a__ , return_token_type_ids=a__ , return_offsets_mapping=a__ , add_special_tokens=a__ , )
lowerCamelCase__ : List[Any] = tokenizer_r.do_lower_case if hasattr(a__ , 'do_lower_case' ) else False
lowerCamelCase__ : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Dict = ['的', '人', '有']
lowerCamelCase__ : Union[str, Any] = ''.join(a__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained(a__ , **a__ )
lowerCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
lowerCamelCase__ : List[Any] = tokenizer_p.encode(a__ , add_special_tokens=a__ )
lowerCamelCase__ : Union[str, Any] = tokenizer_r.encode(a__ , add_special_tokens=a__ )
lowerCamelCase__ : Dict = tokenizer_r.convert_ids_to_tokens(a__ )
lowerCamelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(a__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
lowerCamelCase__ : Any = False
lowerCamelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
lowerCamelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(a__ , **a__ )
lowerCamelCase__ : Union[str, Any] = tokenizer_r.encode(a__ , add_special_tokens=a__ )
lowerCamelCase__ : Optional[Any] = tokenizer_p.encode(a__ , add_special_tokens=a__ )
lowerCamelCase__ : List[str] = tokenizer_r.convert_ids_to_tokens(a__ )
lowerCamelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(a__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__ : Tuple = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(a__ )
]
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
@slow
def A_ ( self : str ) -> Any:
lowerCamelCase__ : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase__ : Tuple = tokenizer.encode('你好' , add_special_tokens=a__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode('你是谁' , add_special_tokens=a__ )
lowerCamelCase__ : int = tokenizer.build_inputs_with_special_tokens(a__ )
lowerCamelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A_ ( self : str ) -> Any:
lowerCamelCase__ : Tuple = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase__ : int = '你好,你是谁'
lowerCamelCase__ : Optional[int] = tokenizer.tokenize(a__ )
lowerCamelCase__ : Tuple = tokenizer.convert_tokens_to_ids(a__ )
lowerCamelCase__ : Any = tokenizer.convert_tokens_to_shape_ids(a__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(a__ )
lowerCamelCase__ : Any = tokenizer.prepare_for_model(
a__ , a__ , a__ , add_special_tokens=a__ )
lowerCamelCase__ : List[Any] = tokenizer.encode_plus(a__ , add_special_tokens=a__ )
self.assertEqual(a__ , a__ )
| 295 |
'''simple docstring'''
from math import factorial
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int:
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 51 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
UpperCamelCase_ : Optional[Any] = 42
UpperCamelCase_ : str = 42
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : int ):
lowerCAmelCase__ = [[] for _ in range(a__ )]
lowerCAmelCase__ = size
def __getitem__( self : Any , snake_case__ : int ):
return iter(self._graph[vertex] )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return self._size
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a__ , a__ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : int , snake_case__ : int ):
lowerCAmelCase__ = deque([start_vertex] )
lowerCAmelCase__ = [None] * self.size
lowerCAmelCase__ = 0
while queue:
lowerCAmelCase__ = queue.popleft()
lowerCAmelCase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase__ = current_distance + edge.weight
lowerCAmelCase__ = distances[edge.destination_vertex]
if (
isinstance(a__ , a__ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A__ ( UpperCAmelCase_ ):
lowerCamelCase__ : int ="MCTCTFeatureExtractor"
lowerCamelCase__ : Union[str, Any] ="AutoTokenizer"
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(a__ , a__ )
__magic_name__ : List[str] = self.feature_extractor
__magic_name__ : int = False
def __call__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a__ , **a__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__magic_name__ : Any = kwargs.pop('''raw_speech''' )
else:
__magic_name__ : int = kwargs.pop('''audio''' , a__ )
__magic_name__ : int = kwargs.pop('''sampling_rate''' , a__ )
__magic_name__ : Dict = kwargs.pop('''text''' , a__ )
if len(a__ ) > 0:
__magic_name__ : int = args[0]
__magic_name__ : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__magic_name__ : List[str] = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__ )
if text is not None:
__magic_name__ : Any = self.tokenizer(a__ , **a__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ : Optional[int] = encodings['''input_ids''']
return inputs
def lowercase ( self , *lowerCamelCase , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def lowercase ( self , *lowerCamelCase , **lowerCamelCase ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*a__ , **a__ )
__magic_name__ : str = kwargs.pop('''input_features''' , a__ )
__magic_name__ : List[str] = kwargs.pop('''labels''' , a__ )
if len(a__ ) > 0:
__magic_name__ : int = args[0]
__magic_name__ : Optional[int] = args[1:]
if input_features is not None:
__magic_name__ : Union[str, Any] = self.feature_extractor.pad(a__ , *a__ , **a__ )
if labels is not None:
__magic_name__ : Optional[Any] = self.tokenizer.pad(a__ , **a__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__magic_name__ : Tuple = labels['''input_ids''']
return input_features
def lowercase ( self , *lowerCamelCase , **lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@contextmanager
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__magic_name__ : Tuple = True
__magic_name__ : Any = self.tokenizer
yield
__magic_name__ : Optional[Any] = self.feature_extractor
__magic_name__ : str = False
| 154 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCAmelCase, UpperCAmelCase = head.next, head
while fast and fast.next:
UpperCAmelCase = fast.next.next
UpperCAmelCase = slow.next
UpperCAmelCase = slow.next
UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase = None
while second:
UpperCAmelCase = second.next
UpperCAmelCase = node
UpperCAmelCase = second
UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase = node.next
UpperCAmelCase = head.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head
while fast and fast.next:
UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase = [slow.val]
while slow.next:
UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase = cur.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
UpperCAmelCase = {}
UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = [pos]
UpperCAmelCase = head.next
pos += 1
UpperCAmelCase = pos - 1
UpperCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 51 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( UpperCAmelCase_ ):
_UpperCAmelCase = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase = '''CLIPImageProcessor'''
_UpperCAmelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , A__=None , A__=None , **A__ ) -> Optional[Any]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
snake_case = kwargs.pop('''feature_extractor''' )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*a__ , **a__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]:
return self.tokenizer.decode(*a__ , **a__ )
@property
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.tokenizer.model_input_names
snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self ) -> Any:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def UpperCamelCase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 342 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51 | 0 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(a__ ) )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : str = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(a__ ) )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
SCREAMING_SNAKE_CASE : Union[str, Any] = "fp16"
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
SCREAMING_SNAKE_CASE : str = "fp16"
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def _A ( self : Any ):
# pass variant but use the non-variant filenames
SCREAMING_SNAKE_CASE : Optional[int] = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
SCREAMING_SNAKE_CASE : Any = "fp16"
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE : Dict = "fp16"
self.assertFalse(is_safetensors_compatible(a__ , variant=a__ ) )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
SCREAMING_SNAKE_CASE : Any = "fp16"
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def _A ( self : str ):
# pass variant but use the non-variant filenames
SCREAMING_SNAKE_CASE : List[str] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
SCREAMING_SNAKE_CASE : str = "fp16"
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
SCREAMING_SNAKE_CASE : List[str] = "fp16"
self.assertFalse(is_safetensors_compatible(a__ , variant=a__ ) )
| 62 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XGLMTokenizer
_lowerCamelCase =XGLMTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(a__ ) , 1008 )
def __snake_case ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ )
UpperCAmelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def __snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : Any ):
# fmt: off
UpperCAmelCase = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
| 51 | 0 |
def a__ ( A__ ):
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1, len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE_ : List[Any] = grid[0]
for row_n in range(1, len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE_ : int = grid[row_n]
SCREAMING_SNAKE_CASE_ : int = fill_row(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = grid[row_n]
return grid[-1][-1]
def a__ ( A__, A__ ):
current_row[0] += row_above[0]
for cell_n in range(1, len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] =logging.get_logger(__name__)
a__ : Optional[int] ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class snake_case ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="megatron-bert"
def __init__( self : Dict , __A : Union[str, Any]=2_9_0_5_6 , __A : Dict=1_0_2_4 , __A : str=2_4 , __A : Any=1_6 , __A : Tuple=4_0_9_6 , __A : Optional[int]="gelu" , __A : Tuple=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[Any]=2 , __A : str=0.02 , __A : Optional[int]=1e-12 , __A : Union[str, Any]=0 , __A : Optional[Any]="absolute" , __A : Dict=True , **__A : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
| 399 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
a__ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a__ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="whisper"
_lowerCamelCase =["past_key_values"]
_lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def __snake_case ( self : List[str] ):
UpperCAmelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
return common_inputs
def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ):
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , )
UpperCAmelCase = encoder_inputs['''input_features'''].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , a__ , a__ , a__ , a__ )
UpperCAmelCase = encoder_inputs.pop('''input_features''' )
UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def __snake_case ( self : Dict ):
return 1e-3
| 51 | 0 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase :int = {'UserAgent': UserAgent().random}
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
_UpperCAmelCase : Union[str, Any] = script.contents[0]
_UpperCAmelCase : Any = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = f'https://www.instagram.com/{username}/'
_UpperCAmelCase : List[Any] = self.get_json()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = requests.get(self.url , headers=a__ ).text
_UpperCAmelCase : Tuple = BeautifulSoup(a__ , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> Dict:
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self ) -> Tuple:
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ) -> Tuple:
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ) -> Dict:
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ) -> Dict:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ) -> Tuple:
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return self.user_data["is_private"]
def lowerCamelCase_ (UpperCamelCase__ : str = "github" ):
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_UpperCAmelCase : Optional[Any] = InstagramUser(SCREAMING_SNAKE_CASE_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase :Optional[int] = InstagramUser('github')
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 506 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =LEDConfig
_lowerCamelCase ={}
_lowerCamelCase ="gelu"
def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ )
UpperCAmelCase = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
UpperCAmelCase = global_attention_mask
return config, inputs_dict
def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ):
UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ )
UpperCAmelCase, UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3 )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase =(
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase =True
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = TFLEDModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ )
def __snake_case ( self : int ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase = 2
UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase = True
UpperCAmelCase = self.model_tester.seq_length
UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ : Tuple ):
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ : int ):
UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
a__ : int = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 )
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
| 51 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('RGB' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ )
return image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
if "visual_encoder" in key:
lowercase__ = re.sub('visual_encoder*' , 'vision_model.encoder' , SCREAMING_SNAKE_CASE_ )
if "blocks" in key:
lowercase__ = re.sub(R'blocks' , 'layers' , SCREAMING_SNAKE_CASE_ )
if "attn" in key:
lowercase__ = re.sub(R'attn' , 'self_attn' , SCREAMING_SNAKE_CASE_ )
if "norm1" in key:
lowercase__ = re.sub(R'norm1' , 'layer_norm1' , SCREAMING_SNAKE_CASE_ )
if "norm2" in key:
lowercase__ = re.sub(R'norm2' , 'layer_norm2' , SCREAMING_SNAKE_CASE_ )
if "encoder.norm" in key:
lowercase__ = re.sub(R'encoder.norm' , 'post_layernorm' , SCREAMING_SNAKE_CASE_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , SCREAMING_SNAKE_CASE_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , SCREAMING_SNAKE_CASE_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , SCREAMING_SNAKE_CASE_ )
if "self_attn" in key:
lowercase__ = re.sub(R'self_attn.proj' , 'self_attn.projection' , SCREAMING_SNAKE_CASE_ )
return key
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
lowercase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
lowercase__ = blip_decoder(pretrained=SCREAMING_SNAKE_CASE_ , image_size=384 , vit='base' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowercase__ = value
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=SCREAMING_SNAKE_CASE_ , device='cpu' )
lowercase__ = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase__ = tokenizer(['a picture of'] ).input_ids
lowercase__ = hf_model.generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(SCREAMING_SNAKE_CASE_ )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
lowercase__ = blip_vqa(pretrained=SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ , vit='base' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
hf_vqa_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
lowercase__ = ['How many dogs are in this image?']
lowercase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_ids
lowercase__ = hf_vqa_model.generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
lowercase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
lowercase__ = blip_itm(pretrained=SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ , vit='base' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(SCREAMING_SNAKE_CASE_ )
lowercase__ = ['A picture of a woman with a dog sitting in a beach']
lowercase__ = tokenizer(
SCREAMING_SNAKE_CASE_ , return_tensors='pt' , padding='max_length' , truncation=SCREAMING_SNAKE_CASE_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_itm_head=SCREAMING_SNAKE_CASE_ )
lowercase__ = hf_itm_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_itm_head=SCREAMING_SNAKE_CASE_ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowercase_ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 235 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
from manim import *
class a ( UpperCAmelCase_ ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : str = [mem.copy() for i in range(6 )]
_UpperCAmelCase : List[Any] = VGroup(*a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Dict = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("CPU" , font_size=24 )
_UpperCAmelCase : Dict = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_UpperCAmelCase : List[Any] = [mem.copy() for i in range(4 )]
_UpperCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Any = Text("GPU" , font_size=24 )
_UpperCAmelCase : Any = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Optional[Any] = Text("Model" , font_size=24 )
_UpperCAmelCase : Optional[Any] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_UpperCAmelCase : Optional[Any] = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_UpperCAmelCase : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
cpu_targs.append(a__ )
_UpperCAmelCase : int = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Tuple = Text("Loaded Checkpoint" , font_size=24 )
_UpperCAmelCase : Optional[Any] = Group(a__ , a__ ).arrange(a__ , aligned_edge=a__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_UpperCAmelCase : Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Dict = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_UpperCAmelCase : Any = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_UpperCAmelCase : str = MarkupText(
f'Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ ) , Write(a__ ) )
self.play(Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : List[str] = []
for i, rect in enumerate(a__ ):
_UpperCAmelCase : Dict = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
first_animations.append(GrowFromCenter(a__ , run_time=1 ) )
_UpperCAmelCase : Union[str, Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(*a__ )
self.wait()
| 300 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 | 0 |
"""simple docstring"""
def __snake_case ( __A : int = 50 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 265 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 51 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_UpperCAmelCase : Dict = HfApi()
_UpperCAmelCase : List[str] = {}
# fmt: off
_UpperCAmelCase : Dict = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_UpperCAmelCase : Dict = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_UpperCAmelCase : str = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_UpperCAmelCase : Dict = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_UpperCAmelCase : Any = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_UpperCAmelCase : Optional[int] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_UpperCAmelCase : Optional[int] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_UpperCAmelCase : str = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_UpperCAmelCase : Union[str, Any] = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_UpperCAmelCase : int = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_UpperCAmelCase : List[str] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_UpperCAmelCase : Union[str, Any] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_UpperCAmelCase : Any = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_UpperCAmelCase : List[Any] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_UpperCAmelCase : List[Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_UpperCAmelCase : Optional[Any] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_UpperCAmelCase : Tuple = '/home/patrick/google_checkpoints/' + mod.modelId.split("""/""")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
_UpperCAmelCase : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
_UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_UpperCAmelCase : Tuple = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_UpperCAmelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_UpperCAmelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 295 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51 | 0 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a_ :
def __init__( self : Tuple , snake_case__ : Tuple , snake_case__ : Optional[Any]=13 , snake_case__ : Tuple=7 , snake_case__ : Dict=True , snake_case__ : Dict=True , snake_case__ : List[Any]=False , snake_case__ : int=True , snake_case__ : Optional[Any]=99 , snake_case__ : Union[str, Any]=64 , snake_case__ : Optional[int]=5 , snake_case__ : Union[str, Any]=4 , snake_case__ : Any=64 , snake_case__ : Optional[int]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=512 , snake_case__ : Optional[Any]=16 , snake_case__ : Dict=2 , snake_case__ : int=0.02 , snake_case__ : int=3 , snake_case__ : str=4 , snake_case__ : List[str]=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str ):
lowerCAmelCase__ = MPNetModel(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ = model(a__ , a__ )
lowerCAmelCase__ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = MPNetForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ = model(
a__ , attention_mask=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[Any] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MPNetForSequenceClassification(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : List[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = MPNetForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
a__ , attention_mask=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MPNetForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
UpperCamelCase_ : List[str] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Dict = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = MPNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=a__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a__ )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a__ )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase__ = model(a__ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a__ )
lowerCAmelCase__ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
| 644 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ):
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str:
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ):
UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : int , a__ : Dict , a__ : Tuple ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : str ):
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = '''facebook/sam-vit-huge'''
UpperCAmelCase = pipeline('''mask-generation''' , model=a__ )
UpperCAmelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 51 | 0 |
import qiskit
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->qiskit.result.counts.Counts:
"""simple docstring"""
__magic_name__ : int = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__magic_name__ : Tuple = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Map the quantum measurement to the classical bits
circuit.measure([0], [0] )
# Execute the circuit on the simulator
__magic_name__ : str = qiskit.execute(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(f"Total count for various states are: {single_qubit_measure(1, 1)}")
| 154 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : List[str] ) ->List[str]:
snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
snake_case = DetaConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE_ , with_box_refine=SCREAMING_SNAKE_CASE_ , two_stage=SCREAMING_SNAKE_CASE_ , )
# set labels
snake_case = '''huggingface/label-files'''
if "o365" in model_name:
snake_case = 366
snake_case = '''object365-id2label.json'''
else:
snake_case = 91
snake_case = '''coco-detection-id2label.json'''
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) ) , '''r''' ) )
snake_case = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( a : Optional[int] ) ->Union[str, Any]:
snake_case = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def __UpperCamelCase ( a : Tuple , a : Any , a : Optional[Any] ) ->List[Any]:
snake_case = dct.pop(SCREAMING_SNAKE_CASE_ )
snake_case = val
def __UpperCamelCase ( a : Tuple , a : int ) ->Tuple:
snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
snake_case = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[:dim, :]
snake_case = in_proj_bias[: dim]
snake_case = in_proj_weight[
dim : dim * 2, :
]
snake_case = in_proj_bias[
dim : dim * 2
]
snake_case = in_proj_weight[
-dim :, :
]
snake_case = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( a : int , a : Optional[int] ) ->int:
snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
snake_case = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[:hidden_size, :]
snake_case = in_proj_bias[:hidden_size]
snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
snake_case = in_proj_bias[hidden_size : hidden_size * 2]
snake_case = in_proj_weight[-hidden_size:, :]
snake_case = in_proj_bias[-hidden_size:]
def __UpperCamelCase ( ) ->Optional[Any]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( a : Tuple , a : List[str] , a : List[Any] ) ->List[Any]:
snake_case = get_deta_config(SCREAMING_SNAKE_CASE_ )
# load original state dict
if model_name == "deta-swin-large":
snake_case = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
snake_case = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
snake_case = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
# rename keys
snake_case = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
snake_case = state_dict.pop(SCREAMING_SNAKE_CASE_ )
snake_case = val
if "input_proj" in key:
snake_case = state_dict.pop(SCREAMING_SNAKE_CASE_ )
snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
snake_case = state_dict.pop(SCREAMING_SNAKE_CASE_ )
snake_case = val
# finally, create HuggingFace model and load state dict
snake_case = DetaForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(SCREAMING_SNAKE_CASE_ )
# load image processor
snake_case = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
snake_case = prepare_img()
snake_case = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
snake_case = encoding['''pixel_values''']
snake_case = model(pixel_values.to(SCREAMING_SNAKE_CASE_ ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
snake_case = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
snake_case = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
snake_case = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
snake_case = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE_ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE_ ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowercase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ):
super(a__ , self ).__init__()
UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ )
UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 )
UpperCAmelCase = torch.nn.Softmax(dim=1 )
def __snake_case ( self : List[Any] , **a__ : Tuple ):
return self.bert(**a__ ).last_hidden_state
def __snake_case ( self : int , a__ : List[str] ):
return token_embeddings.sum(2 , keepdim=a__ )
def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ):
return self.softmax(T * self.cos(a__ , a__ ) )
def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ):
UpperCAmelCase = W_supports['''sizes'''].tolist()
UpperCAmelCase = W_supports['''start_token_id'''].item()
UpperCAmelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = W_supports['''input_ids'''] == start_token_id
UpperCAmelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(a__ ):
if i == 0:
UpperCAmelCase = 0
else:
UpperCAmelCase = support_sizes[i - 1]
UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase = torch.vstack((p_starts, p_start) )
UpperCAmelCase = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase = p_start
UpperCAmelCase = p_end
return p_starts, p_ends
| 51 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
snake_case = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
snake_case = logging.WARNING
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = os.getenv("DATASETS_VERBOSITY" , SCREAMING_SNAKE_CASE_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowerCamelCase__ ( ):
"""simple docstring"""
return __name__.split("." )[0]
def lowerCamelCase__ ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCamelCase__ ( lowercase = None ):
"""simple docstring"""
if name is None:
SCREAMING_SNAKE_CASE : Tuple = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ): # pylint: disable=unused-argument
SCREAMING_SNAKE_CASE : List[str] = args[0] if args else None
def __iter__( self : List[str] ):
return iter(self._iterator )
def __getattr__( self : Optional[int] , UpperCAmelCase_ : str ):
def empty_fn(*UpperCAmelCase_ : str , **UpperCAmelCase_ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : int ):
return self
def __exit__( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
return
snake_case = True
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : Any , *UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , **UpperCAmelCase_ : Union[str, Any] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*a__ , **a__ )
else:
return EmptyTqdm(*a__ , **a__ )
def _A ( self : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*a__ , **a__ )
def _A ( self : Tuple ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
snake_case = _tqdm_cls()
def lowerCamelCase__ ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase__ ( ):
"""simple docstring"""
global _tqdm_active
SCREAMING_SNAKE_CASE : List[str] = True
def lowerCamelCase__ ( ):
"""simple docstring"""
global _tqdm_active
SCREAMING_SNAKE_CASE : Optional[Any] = False
| 62 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =(EulerDiscreteScheduler,)
_lowerCamelCase =10
def __snake_case ( self : str , **a__ : Tuple ):
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a__ )
return config
def __snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __snake_case ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __snake_case ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def __snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 51 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
SCREAMING_SNAKE_CASE_ : Any = dict(zip(a__ , range(len(a__ ) ) ) )
SCREAMING_SNAKE_CASE_ : List[str] = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
SCREAMING_SNAKE_CASE_ : List[Any] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6_0_0_0,
'return_attention_mask': False,
'do_normalize': True,
}
SCREAMING_SNAKE_CASE_ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname , a__ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a__ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a__ ) + '\n' )
# load decoder from hub
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hf-internal-testing/ngram-beam-search-decoder'
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(a__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **a__ )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **a__ )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_decoder()
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , a__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(a__ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=a__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_decoder()
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
SCREAMING_SNAKE_CASE_ : str = floats_list((3, 1_0_0_0) )
SCREAMING_SNAKE_CASE_ : List[Any] = feature_extractor(a__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : List[Any] = processor(a__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = self.get_decoder()
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
SCREAMING_SNAKE_CASE_ : int = 'This is a test string'
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=a__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self , lowerCAmelCase__=(2, 1_0, 1_6) , lowerCAmelCase__=7_7 ):
"""simple docstring"""
np.random.seed(a__ )
return np.random.rand(*a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = self.get_decoder()
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
SCREAMING_SNAKE_CASE_ : str = processor.decode(a__ )
SCREAMING_SNAKE_CASE_ : str = decoder.decode_beams(a__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = self.get_decoder()
SCREAMING_SNAKE_CASE_ : Any = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
SCREAMING_SNAKE_CASE_ : Dict = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(a__ )
else:
with get_context(a__ ).Pool() as pool:
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor.batch_decode(a__ , a__ )
SCREAMING_SNAKE_CASE_ : List[Any] = list(a__ )
with get_context('fork' ).Pool() as p:
SCREAMING_SNAKE_CASE_ : Tuple = decoder.decode_beams_batch(a__ , a__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(a__ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(a__ , decoded_processor.logit_score )
self.assertListEqual(a__ , decoded_processor.lm_score )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_decoder()
SCREAMING_SNAKE_CASE_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
SCREAMING_SNAKE_CASE_ : str = self._get_dummy_logits()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1_5
SCREAMING_SNAKE_CASE_ : Dict = -20.0
SCREAMING_SNAKE_CASE_ : Any = -4.0
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(
a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , )
SCREAMING_SNAKE_CASE_ : List[str] = decoded_processor_out.text
SCREAMING_SNAKE_CASE_ : List[str] = list(a__ )
with get_context('fork' ).Pool() as pool:
SCREAMING_SNAKE_CASE_ : str = decoder.decode_beams_batch(
a__ , a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , )
SCREAMING_SNAKE_CASE_ : Any = [d[0][0] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE_ : List[Any] = [d[0][2] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE_ : List[str] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(a__ , a__ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , a__ )
self.assertTrue(np.array_equal(a__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , a__ , atol=1E-3 ) )
self.assertTrue(np.array_equal(a__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , a__ , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_decoder()
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_dummy_logits()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2.0
SCREAMING_SNAKE_CASE_ : List[Any] = 5.0
SCREAMING_SNAKE_CASE_ : Tuple = -20.0
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Any = processor.batch_decode(
a__ , alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoded_processor_out.text
SCREAMING_SNAKE_CASE_ : Any = list(a__ )
decoder.reset_params(
alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , )
with get_context('fork' ).Pool() as pool:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoder.decode_beams_batch(
a__ , a__ , )
SCREAMING_SNAKE_CASE_ : Any = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(a__ , a__ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , a__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE_ : Tuple = processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE_ : str = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.listdir(a__ )
SCREAMING_SNAKE_CASE_ : Any = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(a__ , a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = snapshot_download('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaProcessorWithLM.from_pretrained(a__ )
SCREAMING_SNAKE_CASE_ : int = processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE_ : Any = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.listdir(a__ )
SCREAMING_SNAKE_CASE_ : Tuple = os.listdir(a__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(a__ , a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE_ : Any = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE_ : int = floats_list((3, 1_0_0_0) )
SCREAMING_SNAKE_CASE_ : List[str] = processor_wavaveca(a__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : str = processor_auto(a__ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_dummy_logits()
SCREAMING_SNAKE_CASE_ : Dict = processor_wavaveca.batch_decode(a__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = processor_auto.batch_decode(a__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_decoder()
SCREAMING_SNAKE_CASE_ : str = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_dummy_logits()[0]
SCREAMING_SNAKE_CASE_ : int = processor.decode(a__ , output_word_offsets=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_dummy_logits()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.batch_decode(a__ , output_word_offsets=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(a__ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase__ ( self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('common_voice' , 'en' , split='train' , streaming=a__ )
SCREAMING_SNAKE_CASE_ : Dict = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
SCREAMING_SNAKE_CASE_ : Dict = iter(a__ )
SCREAMING_SNAKE_CASE_ : Dict = next(a__ )
SCREAMING_SNAKE_CASE_ : Dict = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
SCREAMING_SNAKE_CASE_ : List[Any] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(a__ ).logits.cpu().numpy()
SCREAMING_SNAKE_CASE_ : List[str] = processor.decode(logits[0] , output_word_offsets=a__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
SCREAMING_SNAKE_CASE_ : Dict = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
SCREAMING_SNAKE_CASE_ : Optional[int] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(a__ , 'word' ) ) , a__ )
self.assertEqual(' '.join(self.get_from_offsets(a__ , 'word' ) ) , output.text )
# output times
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(self.get_from_offsets(a__ , 'start_time' ) )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(self.get_from_offsets(a__ , 'end_time' ) )
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(a__ , a__ , atol=0.01 ) )
self.assertTrue(torch.allclose(a__ , a__ , atol=0.01 ) )
| 101 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =CTRLTokenizer
SCREAMING_SNAKE_CASE_ : int =False
SCREAMING_SNAKE_CASE_ : List[Any] =False
def _lowerCamelCase ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
__UpperCamelCase = dict(zip(a__ , range(len(a__ ) ) ) )
__UpperCamelCase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a__ ) )
def _lowerCamelCase ( self : Union[str, Any] , **__A : int ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **a__ )
def _lowerCamelCase ( self : List[str] , __A : List[Any] ):
__UpperCamelCase = 'adapt react readapt apt'
__UpperCamelCase = 'adapt react readapt apt'
return input_text, output_text
def _lowerCamelCase ( self : int ):
__UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase = 'adapt react readapt apt'
__UpperCamelCase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
__UpperCamelCase = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 399 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ):
UpperCAmelCase = []
for k, v in d.items():
UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file:
try:
UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader )
UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) )
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = MobileViTVaConfig()
UpperCAmelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase = 151
UpperCAmelCase = 512
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase = 21
UpperCAmelCase = 512
UpperCAmelCase = '''pascal-voc-id2label.json'''
UpperCAmelCase = True
# orig_config
UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ )
assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
"""simple docstring"""
if base_model:
UpperCAmelCase = ''''''
else:
UpperCAmelCase = '''mobilevitv2.'''
UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase = k[8:]
else:
UpperCAmelCase = k
if ".block." in k:
UpperCAmelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase = [0, 1]
elif i == 4:
UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE_ )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
else:
UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
# remove and rename some keys of load the original model
UpperCAmelCase = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
a__ : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 51 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :Optional[Any] = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
_lowerCAmelCase :Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase_ (UpperCamelCase__ : str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase : int = model_type_to_module_name(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : str = importlib.import_module(F'.{module_name}' , '''transformers.models''' )
try:
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE_ , '''__name__''' , SCREAMING_SNAKE_CASE_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return None
def lowerCamelCase_ (UpperCamelCase__ : Union[str, os.PathLike] , UpperCamelCase__ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[Dict[str, str]] = None , UpperCamelCase__ : Optional[Union[bool, str]] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Union[str, Any] , ):
_UpperCAmelCase : Union[str, Any] = get_file_from_repo(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as reader:
return json.load(SCREAMING_SNAKE_CASE_ )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(a__ )
def __lowerCAmelCase ( cls , A , **A ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = kwargs.pop('''config''' , a__ )
_UpperCAmelCase : Union[str, Any] = kwargs.pop('''trust_remote_code''' , a__ )
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase , _UpperCAmelCase : Dict = ImageProcessingMixin.get_image_processor_dict(a__ , **a__ )
_UpperCAmelCase : int = config_dict.get('''image_processor_type''' , a__ )
_UpperCAmelCase : List[Any] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_UpperCAmelCase : Union[str, Any] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCAmelCase : Union[str, Any] = config_dict.pop('''feature_extractor_type''' , a__ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_UpperCAmelCase : Union[str, Any] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_UpperCAmelCase : Union[str, Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_UpperCAmelCase : Optional[int] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a__ , a__ ):
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(a__ , **a__ )
# It could be in `config.image_processor_type``
_UpperCAmelCase : Optional[int] = getattr(a__ , '''image_processor_type''' , a__ )
if hasattr(a__ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_UpperCAmelCase : Optional[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_UpperCAmelCase : List[Any] = image_processor_class_from_name(a__ )
_UpperCAmelCase : Tuple = image_processor_auto_map is not None
_UpperCAmelCase : Optional[int] = image_processor_class is not None or type(a__ ) in IMAGE_PROCESSOR_MAPPING
_UpperCAmelCase : Optional[Any] = resolve_trust_remote_code(
a__ , a__ , a__ , a__ )
if has_remote_code and trust_remote_code:
_UpperCAmelCase : int = get_class_from_dynamic_module(
a__ , a__ , **a__ )
_UpperCAmelCase : List[Any] = kwargs.pop('''code_revision''' , a__ )
if os.path.isdir(a__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a__ , **a__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a__ , **a__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a__ ) in IMAGE_PROCESSOR_MAPPING:
_UpperCAmelCase : Tuple = IMAGE_PROCESSOR_MAPPING[type(a__ )]
return image_processor_class.from_dict(a__ , **a__ )
raise ValueError(
f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def __lowerCAmelCase ( A , A ) -> Optional[Any]:
IMAGE_PROCESSOR_MAPPING.register(a__ , a__ )
| 506 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase_ ):
_UpperCamelCase : Tuple = 'vit_mae'
def __init__( self : Any , a : Any=768 , a : List[Any]=12 , a : Union[str, Any]=12 , a : Optional[Any]=3_072 , a : Dict="gelu" , a : List[Any]=0.0 , a : int=0.0 , a : int=0.02 , a : Any=1E-1_2 , a : Any=224 , a : List[str]=16 , a : List[str]=3 , a : Optional[int]=True , a : str=16 , a : Union[str, Any]=512 , a : Dict=8 , a : int=2_048 , a : List[str]=0.75 , a : Dict=False , **a : Optional[int] , )-> List[str]:
"""simple docstring"""
super().__init__(**a__ )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = decoder_num_attention_heads
lowercase__ = decoder_hidden_size
lowercase__ = decoder_num_hidden_layers
lowercase__ = decoder_intermediate_size
lowercase__ = mask_ratio
lowercase__ = norm_pix_loss
| 235 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 0 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
for i in range(length - 1 ):
_UpperCAmelCase : Any = i
for k in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if collection[k] < collection[least]:
_UpperCAmelCase : Tuple = k
if least != i:
_UpperCAmelCase , _UpperCAmelCase : Tuple = (collection[i], collection[least])
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 300 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = ConsistencyModelPipeline
_SCREAMING_SNAKE_CASE : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_SCREAMING_SNAKE_CASE : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_SCREAMING_SNAKE_CASE : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCAmelCase ( self : int , _SCREAMING_SNAKE_CASE : List[Any]=False ) -> List[Any]:
"""simple docstring"""
if class_cond:
SCREAMING_SNAKE_CASE : int = self.dummy_cond_unet
else:
SCREAMING_SNAKE_CASE : str = self.dummy_uncond_unet
# Default to CM multistep sampler
SCREAMING_SNAKE_CASE : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
SCREAMING_SNAKE_CASE : Dict = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCAmelCase ( self : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=0 ) -> Tuple:
"""simple docstring"""
if str(a__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE : int = torch.manual_seed(a__ )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Any = ConsistencyModelPipeline(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**a__ ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components(class_cond=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConsistencyModelPipeline(**a__ )
SCREAMING_SNAKE_CASE : Dict = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(a__ )
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : List[Any] = pipe(**a__ ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] = ConsistencyModelPipeline(**a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(a__ )
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**a__ ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components(class_cond=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConsistencyModelPipeline(**a__ )
SCREAMING_SNAKE_CASE : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = pipe(**a__ ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : str , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : Tuple="cpu" , _SCREAMING_SNAKE_CASE : Optional[Any]=torch.floataa , _SCREAMING_SNAKE_CASE : List[Any]=(1, 3, 64, 64) ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.manual_seed(a__ )
SCREAMING_SNAKE_CASE : Dict = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_fixed_latents(seed=a__ , device=a__ , dtype=a__ , shape=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = latents
return inputs
def _lowerCAmelCase ( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , _SCREAMING_SNAKE_CASE : int="cpu" , _SCREAMING_SNAKE_CASE : Any=torch.floataa , _SCREAMING_SNAKE_CASE : List[str]=(1, 3, 64, 64) ) -> Dict:
"""simple docstring"""
if type(a__ ) == str:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.device(a__ )
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=a__ ).manual_seed(a__ )
SCREAMING_SNAKE_CASE : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
return latents
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
SCREAMING_SNAKE_CASE : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
SCREAMING_SNAKE_CASE : Dict = ConsistencyModelPipeline(unet=a__ , scheduler=a__ )
pipe.to(torch_device=a__ )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE : int = self.get_inputs()
SCREAMING_SNAKE_CASE : Tuple = pipe(**a__ ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Dict = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
SCREAMING_SNAKE_CASE : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
SCREAMING_SNAKE_CASE : List[Any] = ConsistencyModelPipeline(unet=a__ , scheduler=a__ )
pipe.to(torch_device=a__ )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE : List[Any] = self.get_inputs()
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Any = pipe(**a__ ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
SCREAMING_SNAKE_CASE : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
SCREAMING_SNAKE_CASE : List[str] = ConsistencyModelPipeline(unet=a__ , scheduler=a__ )
pipe.to(torch_device=a__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE : Dict = self.get_inputs(get_fixed_latents=a__ , device=a__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a__ , enable_math=a__ , enable_mem_efficient=a__ ):
SCREAMING_SNAKE_CASE : List[str] = pipe(**a__ ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
SCREAMING_SNAKE_CASE : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
SCREAMING_SNAKE_CASE : Tuple = ConsistencyModelPipeline(unet=a__ , scheduler=a__ )
pipe.to(torch_device=a__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a__ )
SCREAMING_SNAKE_CASE : List[str] = self.get_inputs(get_fixed_latents=a__ , device=a__ )
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Tuple = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a__ , enable_math=a__ , enable_mem_efficient=a__ ):
SCREAMING_SNAKE_CASE : List[Any] = pipe(**a__ ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Tuple = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 265 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class __lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
__a : Dict = vocab_size
__a : Optional[int] = d_embed
__a : Union[str, Any] = d_proj
__a : Optional[Any] = cutoffs + [vocab_size]
__a : Any = [0] + self.cutoffs
__a : Union[str, Any] = div_val
__a : Union[str, Any] = self.cutoffs[0]
__a : List[Any] = len(self.cutoffs ) - 1
__a : Dict = self.shortlist_size + self.n_clusters
__a : int = keep_order
__a : str = []
__a : Optional[int] = []
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.n_clusters > 0:
__a : Union[str, Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=_UpperCAmelCase , name='''cluster_weight''' )
__a : List[str] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=_UpperCAmelCase , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__a : Dict = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_projs_._{i}""" , )
self.out_projs.append(_UpperCAmelCase )
else:
self.out_projs.append(_UpperCAmelCase )
__a : List[str] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._weight""" , )
__a : Any = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__a , __a : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a : Union[str, Any] = self.d_embed // (self.div_val**i)
__a : List[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_projs_._{i}""" )
self.out_projs.append(_UpperCAmelCase )
__a : List[Any] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._weight""" , )
__a : List[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(_UpperCAmelCase )
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
__a : Any = x
if proj is not None:
__a : Optional[int] = tf.einsum('''ibd,ed->ibe''' , _UpperCAmelCase , _UpperCAmelCase )
return tf.einsum('''ibd,nd->ibn''' , _UpperCAmelCase , _UpperCAmelCase ) + b
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = shape_list(_UpperCAmelCase )
__a : List[str] = tf.range(lp_size[0] , dtype=target.dtype )
__a : Optional[int] = tf.stack([r, target] , 1 )
return tf.gather_nd(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=False ):
__a : List[str] = 0
if self.n_clusters == 0:
__a : Union[str, Any] = self._logit(_UpperCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__a : str = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
__a : Dict = tf.nn.log_softmax(_UpperCAmelCase , axis=-1 )
else:
__a : Any = shape_list(_UpperCAmelCase )
__a : Optional[Any] = []
__a : Any = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__a , __a : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__a : Dict = (target >= l_idx) & (target < r_idx)
__a : Any = tf.where(_UpperCAmelCase )
__a : Optional[int] = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase ) - l_idx
if self.div_val == 1:
__a : Tuple = self.out_layers[0][0][l_idx:r_idx]
__a : Optional[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
__a : Union[str, Any] = self.out_layers[i][0]
__a : Optional[int] = self.out_layers[i][1]
if i == 0:
__a : Optional[int] = tf.concat([cur_W, self.cluster_weight] , 0 )
__a : List[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
__a : Union[str, Any] = self._logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.out_projs[0] )
__a : List[str] = tf.nn.log_softmax(_UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__a : List[Any] = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = self._gather_logprob(_UpperCAmelCase , _UpperCAmelCase )
else:
__a : Any = self._logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.out_projs[i] )
__a : Optional[Any] = tf.nn.log_softmax(_UpperCAmelCase )
__a : List[str] = self.cutoffs[0] + i - 1 # No probability for the head cluster
__a : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_UpperCAmelCase )
if target is not None:
__a : Union[str, Any] = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase )
__a : Optional[Any] = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase )
__a : str = self._gather_logprob(_UpperCAmelCase , _UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_UpperCAmelCase , -cur_logprob , shape_list(_UpperCAmelCase ) )
__a : str = tf.concat(_UpperCAmelCase , axis=-1 )
if target is not None:
if return_mean:
__a : Tuple = tf.reduce_mean(_UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_UpperCAmelCase , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 52 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''Salesforce/blip-image-captioning-base'''
__lowerCAmelCase = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
__lowerCAmelCase = '''image_captioner'''
__lowerCAmelCase = AutoModelForVisionaSeq
__lowerCAmelCase = ['''image''']
__lowerCAmelCase = ['''text''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''vision'''] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.model.generate(**_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0].strip()
| 52 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 1 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __A ( a_ :Dict) -> Tuple:
if is_torch_version('''<''' , '''2.0.0''') or not hasattr(a_ , '''_dynamo'''):
return False
return isinstance(a_ , torch._dynamo.eval_frame.OptimizedModule)
def __A ( a_ :Any , a_ :bool = True) -> List[str]:
__a : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Tuple = is_compiled_module(a_)
if is_compiled:
__a : Tuple = model
__a : Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(a_ , a_):
__a : Tuple = model.module
if not keep_fpaa_wrapper:
__a : Tuple = getattr(a_ , '''forward''')
__a : List[str] = model.__dict__.pop('''_original_forward''' , a_)
if original_forward is not None:
while hasattr(a_ , '''__wrapped__'''):
__a : int = forward.__wrapped__
if forward == original_forward:
break
__a : Tuple = forward
if getattr(a_ , '''_converted_to_transformer_engine''' , a_):
convert_model(a_ , to_transformer_engine=a_)
if is_compiled:
__a : str = model
__a : List[Any] = compiled_model
return model
def __A ( ) -> Union[str, Any]:
PartialState().wait_for_everyone()
def __A ( a_ :List[str] , a_ :List[Any]) -> Union[str, Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(a_ , a_)
elif PartialState().local_process_index == 0:
torch.save(a_ , a_)
@contextmanager
def __A ( **a_ :Optional[int]) -> str:
for key, value in kwargs.items():
__a : int = str(a_)
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __A ( a_ :Any) -> str:
if not hasattr(a_ , '''__qualname__''') and not hasattr(a_ , '''__name__'''):
__a : int = getattr(a_ , '''__class__''' , a_)
if hasattr(a_ , '''__qualname__'''):
return obj.__qualname__
if hasattr(a_ , '''__name__'''):
return obj.__name__
return str(a_)
def __A ( a_ :Any , a_ :Any) -> int:
for key, value in source.items():
if isinstance(a_ , a_):
__a : Tuple = destination.setdefault(a_ , {})
merge_dicts(a_ , a_)
else:
__a : Tuple = value
return destination
def __A ( a_ :int = None) -> bool:
if port is None:
__a : Dict = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM) as s:
return s.connect_ex(('''localhost''', port)) == 0
| 52 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa'''
__lowerCAmelCase = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
__lowerCAmelCase = '''document_qa'''
__lowerCAmelCase = AutoProcessor
__lowerCAmelCase = VisionEncoderDecoderModel
__lowerCAmelCase = ['''image''', '''text''']
__lowerCAmelCase = ['''text''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__a : List[str] = task_prompt.replace('''{user_input}''' , _UpperCAmelCase )
__a : List[Any] = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='''pt''' ).input_ids
__a : Union[str, Any] = self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = self.pre_processor.batch_decode(_UpperCAmelCase )[0]
__a : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__a : Union[str, Any] = re.sub(R'''<.*?>''' , '''''' , _UpperCAmelCase , count=1 ).strip() # remove first task start token
__a : Optional[int] = self.pre_processor.tokenajson(_UpperCAmelCase )
return sequence["answer"]
| 52 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=[1, 16, 4, 4] , _UpperCAmelCase=None , ):
__a : List[Any] = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : List[Any] = patch_size
__a : Optional[Any] = num_channels
__a : int = is_training
__a : str = use_labels
__a : int = hidden_size
__a : Any = num_hidden_layers
__a : Dict = num_attention_heads
__a : Any = intermediate_size
__a : Dict = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : Optional[Any] = attention_probs_dropout_prob
__a : str = type_sequence_label_size
__a : Any = initializer_range
__a : List[str] = scope
__a : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__a : Any = (self.image_size // 32) ** 2
__a : int = num_patches + 1
def _lowerCamelCase ( self ):
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
__a : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCAmelCase , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = ViTHybridModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = self.type_sequence_label_size
__a : List[str] = ViTHybridForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self ):
__a : List[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Optional[int] = config_and_inputs
__a : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__lowerCAmelCase = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : List[Any] = ViTHybridModelTester(self )
__a : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(_UpperCAmelCase )
__a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
__a : Tuple = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__a : Union[str, Any] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _lowerCamelCase ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : int = ViTHybridModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> Tuple:
__a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
__a : Optional[int] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
__a : List[Any] = self.default_image_processor
__a : Dict = prepare_img()
__a : List[str] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__a : List[str] = model(**_UpperCAmelCase )
# verify the logits
__a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : List[str] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def _lowerCamelCase ( self ):
__a : List[Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
__a : List[str] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
__a : Optional[int] = prepare_img()
__a : Union[str, Any] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Dict = model(**_UpperCAmelCase )
__a : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
__a : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
import math
class __lowercase :
'''simple docstring'''
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = 0.0
__a : List[Any] = 0.0
for i in range(len(_UpperCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __A ( ) -> None:
# Training Examples ( m, n )
__a : Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__a : Optional[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__a : List[Any] = SelfOrganizingMap()
__a : Dict = 3
__a : Union[str, Any] = 0.5
for _ in range(a_):
for j in range(len(a_)):
# training sample
__a : List[str] = training_samples[j]
# Compute the winning vector
__a : Optional[Any] = self_organizing_map.get_winner(a_ , a_)
# Update the winning vector
__a : Optional[int] = self_organizing_map.update(a_ , a_ , a_ , a_)
# classify test sample
__a : List[str] = [0, 0, 0, 1]
__a : str = self_organizing_map.get_winner(a_ , a_)
# results
print(F"""Clusters that the test sample belongs to : {winner}""")
print(F"""Weights that have been trained : {weights}""")
# running the main() function
if __name__ == "__main__":
main()
| 52 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __A ( a_ :Optional[int]) -> Tuple:
__a : str = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
))
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
))
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
))
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
))
return embed
def __A ( a_ :Dict , a_ :Optional[Any]) -> Optional[int]:
__a : int = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias"""))
return attention_weights
def __A ( a_ :Tuple) -> str:
__a : str = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token'''))
return token
def __A ( ) -> Union[str, Any]:
__a : Tuple = []
head.append(('''layernorm.weight''', '''norm.weight'''))
head.append(('''layernorm.bias''', '''norm.bias'''))
head.append(('''classifier.weight''', '''head.weight'''))
head.append(('''classifier.bias''', '''head.bias'''))
return head
def __A ( a_ :Tuple , a_ :Tuple , a_ :int , a_ :List[str]) -> str:
__a : Union[str, Any] = '''imagenet-1k-id2label.json'''
__a : Optional[int] = 10_00
__a : str = '''huggingface/label-files'''
__a : str = num_labels
__a : List[Any] = json.load(open(cached_download(hf_hub_url(a_ , a_ , repo_type='''dataset''')) , '''r'''))
__a : Dict = {int(a_): v for k, v in idalabel.items()}
__a : Tuple = idalabel
__a : str = {v: k for k, v in idalabel.items()}
__a : Any = CvtConfig(num_labels=a_ , idalabel=a_ , labelaid=a_)
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1)[-1][4:6] == "13":
__a : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1)[-1][4:6] == "21":
__a : Optional[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a : Tuple = [2, 2, 20]
__a : List[str] = [3, 12, 16]
__a : Optional[Any] = [1_92, 7_68, 10_24]
__a : Dict = CvtForImageClassification(a_)
__a : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''')
__a : List[str] = image_size
__a : Optional[int] = torch.load(a_ , map_location=torch.device('''cpu'''))
__a : Any = OrderedDict()
__a : int = []
for idx in range(len(config.depth)):
if config.cls_token[idx]:
__a : int = list_of_state_dict + cls_token(a_)
__a : Tuple = list_of_state_dict + embeddings(a_)
for cnt in range(config.depth[idx]):
__a : List[str] = list_of_state_dict + attention(a_ , a_)
__a : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a_)
for i in range(len(a_)):
__a : List[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a_)
model.save_pretrained(a_)
image_processor.save_pretrained(a_)
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 52 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __A ( a_ :Optional[int]) -> Optional[Any]:
__a : str = 3_84
if "tiny" in model_name:
__a : Tuple = [3, 3, 9, 3]
__a : Tuple = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
__a : Any = [3, 3, 27, 3]
__a : Union[str, Any] = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
__a : Dict = [3, 3, 27, 3]
__a : List[str] = [1_28, 2_56, 5_12, 10_24]
__a : Optional[Any] = 5_12
if "large" in model_name:
__a : int = [3, 3, 27, 3]
__a : Any = [1_92, 3_84, 7_68, 15_36]
__a : Optional[Any] = 7_68
if "xlarge" in model_name:
__a : Dict = [3, 3, 27, 3]
__a : Optional[Any] = [2_56, 5_12, 10_24, 20_48]
__a : Optional[Any] = 10_24
# set label information
__a : Optional[int] = 1_50
__a : Dict = '''huggingface/label-files'''
__a : int = '''ade20k-id2label.json'''
__a : List[str] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : Union[str, Any] = {int(a_): v for k, v in idalabel.items()}
__a : Tuple = {v: k for k, v in idalabel.items()}
__a : str = ConvNextConfig(
depths=a_ , hidden_sizes=a_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''])
__a : Optional[int] = UperNetConfig(
backbone_config=a_ , auxiliary_in_channels=a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ , )
return config
def __A ( a_ :Union[str, Any]) -> Tuple:
__a : int = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight'''))
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias'''))
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight'''))
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias'''))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias"""))
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias"""))
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight"""))
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias"""))
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
])
# fmt: on
return rename_keys
def __A ( a_ :Dict , a_ :List[Any] , a_ :Union[str, Any]) -> int:
__a : List[Any] = dct.pop(a_)
__a : List[Any] = val
def __A ( a_ :List[str] , a_ :Union[str, Any] , a_ :List[str]) -> List[str]:
__a : List[Any] = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__a : List[str] = model_name_to_url[model_name]
__a : Tuple = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''')['''state_dict''']
__a : Union[str, Any] = get_upernet_config(a_)
__a : int = UperNetForSemanticSegmentation(a_)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__a : Optional[Any] = state_dict.pop(a_)
if "bn" in key:
__a : Union[str, Any] = key.replace('''bn''' , '''batch_norm''')
__a : str = val
# rename keys
__a : int = create_rename_keys(a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
model.load_state_dict(a_)
# verify on image
__a : List[Any] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__a : List[str] = Image.open(requests.get(a_ , stream=a_).raw).convert('''RGB''')
__a : Optional[Any] = SegformerImageProcessor()
__a : List[str] = processor(a_ , return_tensors='''pt''').pixel_values
with torch.no_grad():
__a : List[Any] = model(a_)
if model_name == "upernet-convnext-tiny":
__a : List[str] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]])
elif model_name == "upernet-convnext-small":
__a : Optional[int] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]])
elif model_name == "upernet-convnext-base":
__a : Union[str, Any] = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]])
elif model_name == "upernet-convnext-large":
__a : Optional[Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]])
elif model_name == "upernet-convnext-xlarge":
__a : int = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]])
print('''Logits:''' , outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1e-4)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
print(F"""Saving processor to {pytorch_dump_folder_path}""")
processor.save_pretrained(a_)
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""")
model.push_to_hub(F"""openmmlab/{model_name}""")
processor.push_to_hub(F"""openmmlab/{model_name}""")
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 52 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :Optional[Any]) -> Optional[int]:
__a : Any = []
__a : Union[str, Any] = []
__a : Dict = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__a : Optional[Any] = len(a_) if (len(a_) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8) , '''Stack'''.center(a_) , '''Postfix'''.center(a_) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a_) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a_) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a_) == 0:
stack.append(a_) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a_) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(a_) # push x to stack
print(
x.center(8) , (''''''.join(a_)).ljust(a_) , (''''''.join(a_)).ljust(a_) , sep=''' | ''' , ) # Output in tabular format
while len(a_) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
''' '''.center(8) , (''''''.join(a_)).ljust(a_) , (''''''.join(a_)).ljust(a_) , sep=''' | ''' , ) # Output in tabular format
return "".join(a_) # return Postfix as str
def __A ( a_ :Optional[int]) -> List[str]:
__a : Any = list(infix[::-1]) # reverse the infix equation
for i in range(len(a_)):
if infix[i] == "(":
__a : Optional[int] = ''')''' # change "(" to ")"
elif infix[i] == ")":
__a : int = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(a_)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
A = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 52 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __get__( self , _UpperCAmelCase , _UpperCAmelCase=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
__a : Union[str, Any] = '''__cached_''' + self.fget.__name__
__a : Union[str, Any] = getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if cached is None:
__a : str = self.fget(_UpperCAmelCase )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return cached
def __A ( a_ :Dict) -> Tuple:
__a : List[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""")
def __A ( a_ :Any) -> Tuple:
if is_torch_fx_proxy(a_):
return True
if is_torch_available():
import torch
if isinstance(a_ , torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(a_ , tf.Tensor):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(a_ , (jnp.ndarray, Tracer)):
return True
return isinstance(a_ , np.ndarray)
def __A ( a_ :Optional[Any]) -> Any:
return isinstance(a_ , np.ndarray)
def __A ( a_ :List[str]) -> Dict:
return _is_numpy(a_)
def __A ( a_ :Tuple) -> str:
import torch
return isinstance(a_ , torch.Tensor)
def __A ( a_ :Tuple) -> Optional[Any]:
return False if not is_torch_available() else _is_torch(a_)
def __A ( a_ :int) -> List[str]:
import torch
return isinstance(a_ , torch.device)
def __A ( a_ :List[str]) -> Tuple:
return False if not is_torch_available() else _is_torch_device(a_)
def __A ( a_ :List[Any]) -> List[str]:
import torch
if isinstance(a_ , a_):
if hasattr(a_ , a_):
__a : Dict = getattr(a_ , a_)
else:
return False
return isinstance(a_ , torch.dtype)
def __A ( a_ :List[str]) -> Any:
return False if not is_torch_available() else _is_torch_dtype(a_)
def __A ( a_ :Optional[Any]) -> List[str]:
import tensorflow as tf
return isinstance(a_ , tf.Tensor)
def __A ( a_ :int) -> Tuple:
return False if not is_tf_available() else _is_tensorflow(a_)
def __A ( a_ :int) -> int:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(a_ , '''is_symbolic_tensor'''):
return tf.is_symbolic_tensor(a_)
return type(a_) == tf.Tensor
def __A ( a_ :Any) -> int:
return False if not is_tf_available() else _is_tf_symbolic_tensor(a_)
def __A ( a_ :List[Any]) -> Union[str, Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(a_ , jnp.ndarray)
def __A ( a_ :Tuple) -> int:
return False if not is_flax_available() else _is_jax(a_)
def __A ( a_ :List[Any]) -> Optional[int]:
if isinstance(a_ , (dict, UserDict)):
return {k: to_py_obj(a_) for k, v in obj.items()}
elif isinstance(a_ , (list, tuple)):
return [to_py_obj(a_) for o in obj]
elif is_tf_tensor(a_):
return obj.numpy().tolist()
elif is_torch_tensor(a_):
return obj.detach().cpu().tolist()
elif is_jax_tensor(a_):
return np.asarray(a_).tolist()
elif isinstance(a_ , (np.ndarray, np.number)): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __A ( a_ :Tuple) -> str:
if isinstance(a_ , (dict, UserDict)):
return {k: to_numpy(a_) for k, v in obj.items()}
elif isinstance(a_ , (list, tuple)):
return np.array(a_)
elif is_tf_tensor(a_):
return obj.numpy()
elif is_torch_tensor(a_):
return obj.detach().cpu().numpy()
elif is_jax_tensor(a_):
return np.asarray(a_)
else:
return obj
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(_UpperCAmelCase ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
__a : Union[str, Any] = getattr(self , class_fields[0].name )
__a : int = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : Tuple = first_field.items()
__a : Any = True
else:
try:
__a : Tuple = iter(_UpperCAmelCase )
__a : str = True
except TypeError:
__a : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_UpperCAmelCase ):
if (
not isinstance(_UpperCAmelCase , (list, tuple) )
or not len(_UpperCAmelCase ) == 2
or not isinstance(element[0] , _UpperCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__a : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__a : Tuple = element[1]
elif first_field is not None:
__a : List[str] = first_field
else:
for field in class_fields:
__a : str = getattr(self , field.name )
if v is not None:
__a : Tuple = v
def __delitem__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , _UpperCAmelCase , _UpperCAmelCase ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_UpperCAmelCase , _UpperCAmelCase )
super().__setattr__(_UpperCAmelCase , _UpperCAmelCase )
def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase ):
# Will raise a KeyException if needed
super().__setitem__(_UpperCAmelCase , _UpperCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
return tuple(self[k] for k in self.keys() )
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase ):
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''longest'''
__lowerCAmelCase = '''max_length'''
__lowerCAmelCase = '''do_not_pad'''
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''pt'''
__lowerCAmelCase = '''tf'''
__lowerCAmelCase = '''np'''
__lowerCAmelCase = '''jax'''
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
__a : Union[str, Any] = context_managers
__a : Optional[int] = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(_UpperCAmelCase )
def __exit__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
self.stack.__exit__(*_UpperCAmelCase , **_UpperCAmelCase )
def __A ( a_ :Any) -> str:
__a : Tuple = infer_framework(a_)
if framework == "tf":
__a : Dict = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
__a : List[Any] = inspect.signature(model_class.forward) # PyTorch models
else:
__a : Optional[Any] = inspect.signature(model_class.__call__) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __A ( a_ :List[Any]) -> str:
__a : Optional[int] = model_class.__name__
__a : List[str] = infer_framework(a_)
if framework == "tf":
__a : Union[str, Any] = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
__a : Optional[Any] = inspect.signature(model_class.forward) # PyTorch models
else:
__a : Any = inspect.signature(model_class.__call__) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __A ( a_ :MutableMapping , a_ :str = "" , a_ :str = ".") -> Any:
def _flatten_dict(a_ :Tuple , a_ :Optional[Any]="" , a_ :Any="."):
for k, v in d.items():
__a : Optional[int] = str(a_) + delimiter + str(a_) if parent_key else k
if v and isinstance(a_ , a_):
yield from flatten_dict(a_ , a_ , delimiter=a_).items()
else:
yield key, v
return dict(_flatten_dict(a_ , a_ , a_))
@contextmanager
def __A ( a_ :Optional[int] , a_ :bool = False) -> List[str]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __A ( a_ :Tuple , a_ :Any=None) -> str:
if is_numpy_array(a_):
return np.transpose(a_ , axes=a_)
elif is_torch_tensor(a_):
return array.T if axes is None else array.permute(*a_)
elif is_tf_tensor(a_):
import tensorflow as tf
return tf.transpose(a_ , perm=a_)
elif is_jax_tensor(a_):
return jnp.transpose(a_ , axes=a_)
else:
raise ValueError(F"""Type not supported for transpose: {type(a_)}.""")
def __A ( a_ :List[str] , a_ :Dict) -> str:
if is_numpy_array(a_):
return np.reshape(a_ , a_)
elif is_torch_tensor(a_):
return array.reshape(*a_)
elif is_tf_tensor(a_):
import tensorflow as tf
return tf.reshape(a_ , a_)
elif is_jax_tensor(a_):
return jnp.reshape(a_ , a_)
else:
raise ValueError(F"""Type not supported for reshape: {type(a_)}.""")
def __A ( a_ :Any , a_ :str=None) -> Any:
if is_numpy_array(a_):
return np.squeeze(a_ , axis=a_)
elif is_torch_tensor(a_):
return array.squeeze() if axis is None else array.squeeze(dim=a_)
elif is_tf_tensor(a_):
import tensorflow as tf
return tf.squeeze(a_ , axis=a_)
elif is_jax_tensor(a_):
return jnp.squeeze(a_ , axis=a_)
else:
raise ValueError(F"""Type not supported for squeeze: {type(a_)}.""")
def __A ( a_ :str , a_ :List[str]) -> Tuple:
if is_numpy_array(a_):
return np.expand_dims(a_ , a_)
elif is_torch_tensor(a_):
return array.unsqueeze(dim=a_)
elif is_tf_tensor(a_):
import tensorflow as tf
return tf.expand_dims(a_ , axis=a_)
elif is_jax_tensor(a_):
return jnp.expand_dims(a_ , axis=a_)
else:
raise ValueError(F"""Type not supported for expand_dims: {type(a_)}.""")
def __A ( a_ :Dict) -> Dict:
if is_numpy_array(a_):
return np.size(a_)
elif is_torch_tensor(a_):
return array.numel()
elif is_tf_tensor(a_):
import tensorflow as tf
return tf.size(a_)
elif is_jax_tensor(a_):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(a_)}.""")
def __A ( a_ :Tuple , a_ :Tuple) -> Optional[Any]:
for key, value in auto_map.items():
if isinstance(a_ , (tuple, list)):
__a : Dict = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
__a : Optional[int] = F"""{repo_id}--{value}"""
return auto_map
def __A ( a_ :Tuple) -> int:
for base_class in inspect.getmro(a_):
__a : Any = base_class.__module__
__a : Dict = base_class.__name__
if module.startswith('''tensorflow''') or module.startswith('''keras''') or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''') or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''') or module.startswith('''jax''') or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""")
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 1 |
"""simple docstring"""
from collections import deque
def __A ( a_ :Dict) -> int:
__a : int = len(a_)
__a : Any = deque()
__a : Union[str, Any] = [False for _ in range(a_)]
__a : Any = [-1 for _ in range(a_)]
__a : Optional[Any] = index_of[:]
def strong_connect(a_ :str , a_ :int , a_ :Union[str, Any]):
__a : List[str] = index # the number when this node is seen
__a : int = index # lowest rank node reachable from here
index += 1
stack.append(a_)
__a : str = True
for w in g[v]:
if index_of[w] == -1:
__a : Any = strong_connect(a_ , a_ , a_)
__a : Optional[int] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__a : Tuple = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__a : Dict = []
__a : Dict = stack.pop()
__a : int = False
component.append(a_)
while w != v:
__a : int = stack.pop()
__a : List[str] = False
component.append(a_)
components.append(a_)
return index
__a : List[str] = []
for v in range(a_):
if index_of[v] == -1:
strong_connect(a_ , 0 , a_)
return components
def __A ( a_ :int , a_ :str) -> Dict:
__a : Dict = [[] for _ in range(a_)]
for u, v in edges:
g[u].append(a_)
return g
if __name__ == "__main__":
# Test
A = 7
A = [0, 0, 1, 2, 3, 3, 4, 4, 6]
A = [1, 3, 2, 0, 1, 4, 5, 6, 5]
A = [(u, v) for u, v in zip(source, target)]
A = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :str) -> list[int]:
return [ord(a_) - 96 for elem in plain]
def __A ( a_ :list[int]) -> str:
return "".join(chr(elem + 96) for elem in encoded)
def __A ( ) -> None:
__a : Dict = encode(input('''-> ''').strip().lower())
print('''Encoded: ''' , a_)
print('''Decoded:''' , decode(a_))
if __name__ == "__main__":
main()
| 52 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
A = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
A = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __A ( a_ :Any , a_ :Union[str, Any]=False) -> Any:
__a , __a : str = create_model(
'''HTSAT-tiny''' , '''roberta''' , a_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=a_ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def __A ( a_ :Union[str, Any]) -> List[str]:
__a : str = {}
__a : str = R'''.*sequential.(\d+).*'''
__a : Optional[int] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__a : List[Any] = key.replace(a_ , a_)
if re.match(a_ , a_):
# replace sequential layers with list
__a : str = re.match(a_ , a_).group(1)
__a : Union[str, Any] = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(a_)//3}.linear.""")
elif re.match(a_ , a_):
__a : Tuple = int(re.match(a_ , a_).group(1))
# Because in CLAP they use `nn.Sequential`...
__a : Tuple = 1 if projecton_layer == 0 else 2
__a : Optional[Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""")
if "audio" and "qkv" in key:
# split qkv into query key and value
__a : Optional[Any] = value
__a : Tuple = mixed_qkv.size(0) // 3
__a : List[str] = mixed_qkv[:qkv_dim]
__a : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
__a : Tuple = mixed_qkv[qkv_dim * 2 :]
__a : Union[str, Any] = query_layer
__a : int = key_layer
__a : Tuple = value_layer
else:
__a : str = value
return model_state_dict
def __A ( a_ :str , a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=False) -> List[Any]:
__a , __a : List[str] = init_clap(a_ , enable_fusion=a_)
clap_model.eval()
__a : Optional[Any] = clap_model.state_dict()
__a : List[str] = rename_state_dict(a_)
__a : Optional[int] = ClapConfig()
__a : int = enable_fusion
__a : Optional[int] = ClapModel(a_)
# ignore the spectrogram embedding layer
model.load_state_dict(a_ , strict=a_)
model.save_pretrained(a_)
transformers_config.save_pretrained(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
A = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 52 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52 | 1 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 | 1 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __A ( ) -> Dict:
__a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=a_ , default=a_ , required=a_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=a_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=a_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=a_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=a_ , default=0 , help='''cuda_id.''' , )
__a : Any = parser.parse_args()
return args
def __A ( a_ :List[str] , a_ :Dict , a_ :int) -> List[Any]:
if not len(a_) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''')
__a , __a : Union[str, Any] = imgs[0].size
__a : List[str] = Image.new('''RGB''' , size=(cols * w, rows * h))
__a , __a : Optional[int] = grid.size
for i, img in enumerate(a_):
grid.paste(a_ , box=(i % cols * w, i // cols * h))
return grid
def __A ( a_ :int , a_ :Dict="robotic cat with wings" , a_ :Any=7.5 , a_ :Optional[Any]=50 , a_ :Optional[int]=1 , a_ :int=42 , ) -> List[str]:
__a : Any = torch.Generator(pipeline.device).manual_seed(a_)
__a : Optional[Any] = pipeline(
a_ , guidance_scale=a_ , num_inference_steps=a_ , generator=a_ , num_images_per_prompt=a_ , ).images
__a : Dict = int(math.sqrt(a_))
__a : Dict = image_grid(a_ , rows=_rows , cols=num_images_per_prompt // _rows)
return grid, images
A = parse_args()
# Load models and create wrapper for stable diffusion
A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
A = unet.to(torch.device('''cuda''', args.cuda_id))
A = pipeline.to(unet.device)
A , A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
A = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 52 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def __A ( a_ :List[Any]) -> List[str]:
__a : Dict = '''huggingface/label-files'''
__a : Dict = '''imagenet-1k-id2label.json'''
__a : List[str] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : int = {int(a_): v for k, v in idalabel.items()}
__a : Optional[int] = {v: k for k, v in idalabel.items()}
__a : Any = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__a : int = BitConfig(
conv_layer=a_ , num_labels=10_00 , idalabel=a_ , labelaid=a_ , )
return config
def __A ( a_ :Optional[int]) -> Optional[int]:
if "stem.conv" in name:
__a : Any = name.replace('''stem.conv''' , '''bit.embedder.convolution''')
if "blocks" in name:
__a : List[str] = name.replace('''blocks''' , '''layers''')
if "head.fc" in name:
__a : List[str] = name.replace('''head.fc''' , '''classifier.1''')
if name.startswith('''norm'''):
__a : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
__a : str = '''bit.encoder.''' + name
return name
def __A ( ) -> Optional[Any]:
__a : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : Any = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def __A ( a_ :Optional[int] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> Union[str, Any]:
__a : List[str] = get_config(a_)
# load original model from timm
__a : int = create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model
__a : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
__a : Tuple = state_dict.pop(a_)
__a : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
__a : List[str] = BitForImageClassification(a_)
model.eval()
model.load_state_dict(a_)
# create image processor
__a : Tuple = create_transform(**resolve_data_config({} , model=a_))
__a : Optional[Any] = transform.transforms
__a : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__a : List[str] = BitImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__a : List[str] = prepare_img()
__a : Optional[Any] = transform(a_).unsqueeze(0)
__a : Union[str, Any] = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
__a : Tuple = model(a_)
__a : Any = outputs.logits
print('''Logits:''' , logits[0, :3])
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1).item()])
__a : Union[str, Any] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
processor.save_pretrained(a_)
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""")
model.push_to_hub(F"""ybelkada/{model_name}""")
processor.push_to_hub(F"""ybelkada/{model_name}""")
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
A = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 52 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 52 | 1 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
A = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ :Optional[Any]) -> Optional[Any]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]) -> int:
return max(metric_fn(a_ , a_) for gt in ground_truths)
def __A ( a_ :List[str] , a_ :List[str] , a_ :Optional[Any]) -> Dict:
__a : List[Any] = [line.strip() for line in open(a_ , '''r''').readlines()]
__a : Dict = []
if args.gold_data_mode == "qa":
__a : Dict = pd.read_csv(a_ , sep='''\t''' , header=a_)
for answer_list in data[1]:
__a : List[str] = ast.literal_eval(a_)
answers.append(a_)
else:
__a : Union[str, Any] = [line.strip() for line in open(a_ , '''r''').readlines()]
__a : Union[str, Any] = [[reference] for reference in references]
__a : Dict = 0
for prediction, ground_truths in zip(a_ , a_):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_)
fa += metric_max_over_ground_truths(a_ , a_ , a_)
__a : List[str] = 1_0_0.0 * em / total
__a : List[str] = 1_0_0.0 * fa / total
logger.info(F"""F1: {fa:.2f}""")
logger.info(F"""EM: {em:.2f}""")
def __A ( a_ :Union[str, Any] , a_ :List[Any] , a_ :Optional[Any]) -> Optional[int]:
__a : Optional[int] = args.k
__a : Optional[int] = [line.strip() for line in open(a_ , '''r''').readlines()]
__a : Optional[Any] = [line.strip() for line in open(a_ , '''r''').readlines()]
__a : Optional[int] = 0
for hypo, reference in zip(a_ , a_):
__a : List[str] = set(hypo.split('''\t''')[:k])
__a : Dict = set(reference.split('''\t'''))
total += 1
em += len(hypo_provenance & ref_provenance) / k
__a : Tuple = 1_0_0.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""")
def __A ( a_ :Dict , a_ :Any , a_ :int) -> int:
def strip_title(a_ :List[str]):
if title.startswith('''"'''):
__a : str = title[1:]
if title.endswith('''"'''):
__a : Dict = title[:-1]
return title
__a : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device)
__a : Optional[Any] = rag_model.rag.question_encoder(a_)
__a : Dict = question_enc_outputs[0]
__a : List[str] = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
__a : Optional[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids)
__a : Dict = []
for docs in all_docs:
__a : Any = [strip_title(a_) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_))
return provenance_strings
def __A ( a_ :Optional[Any] , a_ :str , a_ :str) -> List[str]:
with torch.no_grad():
__a : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_)
__a : Optional[int] = inputs_dict.input_ids.to(args.device)
__a : List[Any] = inputs_dict.attention_mask.to(args.device)
__a : List[Any] = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__a : Optional[Any] = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_)
if args.print_predictions:
for q, a in zip(a_ , a_):
logger.info('''Q: {} - A: {}'''.format(a_ , a_))
return answers
def __A ( ) -> Union[str, Any]:
__a : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''')
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''')
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''')
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''')
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
__a : Optional[Any] = parser.parse_args()
__a : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
return args
def __A ( a_ :Optional[int]) -> str:
__a : Any = {}
if args.model_type is None:
__a : List[Any] = infer_model_type(args.model_name_or_path)
assert args.model_type is not None
if args.model_type.startswith('''rag'''):
__a : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
__a : Optional[int] = args.n_docs
if args.index_name is not None:
__a : Union[str, Any] = args.index_name
if args.index_path is not None:
__a : Tuple = args.index_path
else:
__a : Tuple = BartForConditionalGeneration
__a : str = (
[f.path for f in os.scandir(args.model_name_or_path) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_)
__a : Optional[int] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
__a : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path))
score_fn(a_ , args.predictions_path , args.gold_data_path)
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_))
logger.info(''' Batch size = %d''' , args.eval_batch_size)
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path))
if args.model_type.startswith('''rag'''):
__a : Optional[int] = RagRetriever.from_pretrained(a_ , **a_)
__a : Tuple = model_class.from_pretrained(a_ , retriever=a_ , **a_)
model.retriever.init_retrieval()
else:
__a : Dict = model_class.from_pretrained(a_ , **a_)
model.to(args.device)
with open(args.evaluation_set , '''r''') as eval_file, open(args.predictions_path , '''w''') as preds_file:
__a : Any = []
for line in tqdm(a_):
questions.append(line.strip())
if len(a_) == args.eval_batch_size:
__a : Tuple = evaluate_batch_fn(a_ , a_ , a_)
preds_file.write('''\n'''.join(a_) + '''\n''')
preds_file.flush()
__a : Dict = []
if len(a_) > 0:
__a : Optional[Any] = evaluate_batch_fn(a_ , a_ , a_)
preds_file.write('''\n'''.join(a_))
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path)
if __name__ == "__main__":
A = get_args()
main(args)
| 52 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __A ( a_ :list[Any]) -> None:
create_state_space_tree(a_ , [] , 0)
def __A ( a_ :list[Any] , a_ :list[Any] , a_ :int) -> None:
if index == len(a_):
print(a_)
return
create_state_space_tree(a_ , a_ , index + 1)
current_subsequence.append(sequence[index])
create_state_space_tree(a_ , a_ , index + 1)
current_subsequence.pop()
if __name__ == "__main__":
A = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 52 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.