code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
a : Any = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__SCREAMING_SNAKE_CASE = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A ( self : Any ):
"""simple docstring"""
if self.train_file is not None:
__snake_case = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self : List[Any] , a_ : str ):
"""simple docstring"""
__snake_case = "label" if "label" in features[0].keys() else "labels"
__snake_case = [feature.pop(a_ ) for feature in features]
__snake_case = len(a_ )
__snake_case = len(features[0]["input_ids"] )
__snake_case = [
[{k: v[i] for k, v in feature.items()} for i in range(a_ )] for feature in features
]
__snake_case = list(chain(*a_ ) )
__snake_case = self.tokenizer.pad(
a_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__snake_case = {k: v.view(a_ , a_ , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case = torch.tensor(a_ , dtype=torch.intaa )
return batch
def __UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case = {}
if data_args.train_file is not None:
__snake_case = data_args.train_file
if data_args.validation_file is not None:
__snake_case = data_args.validation_file
__snake_case = data_args.train_file.split("." )[-1]
__snake_case = load_dataset(
_UpperCAmelCase , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case = [F'''ending{i}''' for i in range(4 )]
__snake_case = "sent1"
__snake_case = "sent2"
if data_args.max_seq_length is None:
__snake_case = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__snake_case = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__snake_case = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCAmelCase : Union[str, Any] ):
__snake_case = [[context] * 4 for context in examples[context_name]]
__snake_case = examples[question_header_name]
__snake_case = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_UpperCAmelCase )
]
# Flatten out
__snake_case = list(chain(*_UpperCAmelCase ) )
__snake_case = list(chain(*_UpperCAmelCase ) )
# Tokenize
__snake_case = tokenizer(
_UpperCAmelCase , _UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__snake_case = raw_datasets["train"]
if data_args.max_train_samples is not None:
__snake_case = min(len(_UpperCAmelCase ) , data_args.max_train_samples )
__snake_case = train_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__snake_case = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__snake_case = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__snake_case = min(len(_UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case = eval_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__snake_case = eval_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCAmelCase : Dict ):
__snake_case , __snake_case = eval_predictions
__snake_case = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case = None
if training_args.resume_from_checkpoint is not None:
__snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case = last_checkpoint
__snake_case = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case = train_result.metrics
__snake_case = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
__snake_case = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics("train" , _UpperCAmelCase )
trainer.save_metrics("train" , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__snake_case = trainer.evaluate()
__snake_case = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
__snake_case = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics("eval" , _UpperCAmelCase )
trainer.save_metrics("eval" , _UpperCAmelCase )
__snake_case = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 69 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__snake_case = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case = gray_code_sequence_string(bit_count - 1 )
__snake_case = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case = "0" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case = "1" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def a_ (_lowerCAmelCase : Any )-> Optional[int]:
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def a_ ()-> Tuple:
snake_case: Union[str, Any] = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=_lowerCAmelCase )
snake_case: Dict = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
TestCommand.register_subcommand(_lowerCAmelCase )
RunBeamCommand.register_subcommand(_lowerCAmelCase )
DummyDataCommand.register_subcommand(_lowerCAmelCase )
# Parse args
snake_case: Optional[int] = parser.parse_known_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
snake_case: int = parse_unknown_args(_lowerCAmelCase )
# Run
snake_case: Tuple = args.func(_lowerCAmelCase , **_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 705 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'funnel'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , __lowerCamelCase=3_05_22 , __lowerCamelCase=[4, 4, 4] , __lowerCamelCase=None , __lowerCamelCase=2 , __lowerCamelCase=7_68 , __lowerCamelCase=12 , __lowerCamelCase=64 , __lowerCamelCase=30_72 , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=None , __lowerCamelCase=1e-9 , __lowerCamelCase="mean" , __lowerCamelCase="relative_shift" , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , **__lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
snake_case: int = vocab_size
snake_case: List[str] = block_sizes
snake_case: str = [1] * len(__lowerCamelCase ) if block_repeats is None else block_repeats
assert len(__lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case: Any = num_decoder_layers
snake_case: List[str] = d_model
snake_case: Any = n_head
snake_case: str = d_head
snake_case: Optional[Any] = d_inner
snake_case: Dict = hidden_act
snake_case: Tuple = hidden_dropout
snake_case: Optional[Any] = attention_dropout
snake_case: Optional[int] = activation_dropout
snake_case: Union[str, Any] = initializer_range
snake_case: Tuple = initializer_std
snake_case: Optional[int] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
snake_case: str = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
snake_case: List[str] = attention_type
snake_case: str = separate_cls
snake_case: Dict = truncate_seq
snake_case: List[Any] = pool_q_only
super().__init__(**__lowerCamelCase )
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> Tuple:
'''simple docstring'''
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 164 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def UpperCAmelCase_ ( snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def UpperCAmelCase_ ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = torch.load(_lowercase , map_location='cpu' )
lowerCAmelCase__ = Namespace(**checkpoint['cfg']['model'] )
lowerCAmelCase__ = checkpoint['model']
remove_ignore_keys_(_lowercase )
lowerCAmelCase__ = state_dict['decoder.embed_tokens.weight'].shape[0]
lowerCAmelCase__ = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
lowerCAmelCase__ = XGLMConfig(
vocab_size=_lowercase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase__ = XGLMForCausalLM(_lowercase )
lowerCAmelCase__ = model.load_state_dict(_lowercase , strict=_lowercase )
print(_lowercase )
lowerCAmelCase__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : Optional[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 193 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=1024 ) -> Union[str, Any]:
UpperCamelCase , UpperCamelCase = [], []
UpperCamelCase = list(zip(_lowercase , _lowercase ) )
UpperCamelCase , UpperCamelCase = sorted_examples[0]
def is_too_big(_lowercase ):
return tok(_lowercase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCamelCase = new_src + ' ' + src
UpperCamelCase = new_tgt + ' ' + tgt
if is_too_big(_lowercase ) or is_too_big(_lowercase ): # cant fit, finalize example
finished_src.append(_lowercase )
finished_tgt.append(_lowercase )
UpperCamelCase , UpperCamelCase = src, tgt
else: # can fit, keep adding
UpperCamelCase , UpperCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowercase )
finished_tgt.append(_lowercase )
return finished_src, finished_tgt
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCamelCase = Path(_lowercase )
save_path.mkdir(exist_ok=_lowercase )
for split in ["train"]:
UpperCamelCase , UpperCamelCase = data_dir / F'{split}.source', data_dir / F'{split}.target'
UpperCamelCase = [x.rstrip() for x in Path(_lowercase ).open().readlines()]
UpperCamelCase = [x.rstrip() for x in Path(_lowercase ).open().readlines()]
UpperCamelCase , UpperCamelCase = pack_examples(_lowercase , _lowercase , _lowercase , _lowercase )
print(F'packed {split} split from {len(_lowercase )} examples -> {len(_lowercase )}.' )
Path(save_path / F'{split}.source' ).open('w' ).write('\n'.join(_lowercase ) )
Path(save_path / F'{split}.target' ).open('w' ).write('\n'.join(_lowercase ) )
for split in ["val", "test"]:
UpperCamelCase , UpperCamelCase = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(_lowercase , save_path / F'{split}.source' )
shutil.copyfile(_lowercase , save_path / F'{split}.target' )
def __lowerCamelCase ( ) -> Union[str, Any]:
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=_lowercase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=_lowercase , default=128 )
parser.add_argument('--data_dir' , type=_lowercase )
parser.add_argument('--save_path' , type=_lowercase )
UpperCamelCase = parser.parse_args()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowercase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 282 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase__( __UpperCamelCase: Dict ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[str] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
return max(metric_fn(__UpperCamelCase ,__UpperCamelCase ) for gt in ground_truths )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Any ,__UpperCamelCase: Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__UpperCamelCase ,'r' ).readlines()]
SCREAMING_SNAKE_CASE : Tuple = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : int = pd.read_csv(__UpperCamelCase ,sep='\t' ,header=__UpperCamelCase )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : Optional[Any] = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = [line.strip() for line in open(__UpperCamelCase ,'r' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(__UpperCamelCase ,__UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE : Any = 1_0_0.0 * fa / total
logger.info(f"F1: {fa:.2f}" )
logger.info(f"EM: {em:.2f}" )
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = args.k
SCREAMING_SNAKE_CASE : Dict = [line.strip() for line in open(__UpperCamelCase ,'r' ).readlines()]
SCREAMING_SNAKE_CASE : str = [line.strip() for line in open(__UpperCamelCase ,'r' ).readlines()]
SCREAMING_SNAKE_CASE : str = 0
for hypo, reference in zip(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('\t' )[:k] )
SCREAMING_SNAKE_CASE : Tuple = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Any = 1_0_0.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}" )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: str ,__UpperCamelCase: Optional[Any] ):
"""simple docstring"""
def strip_title(__UpperCamelCase: List[Any] ):
if title.startswith('"' ):
SCREAMING_SNAKE_CASE : List[Any] = title[1:]
if title.endswith('"' ):
SCREAMING_SNAKE_CASE : List[Any] = title[:-1]
return title
SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase ,return_tensors='pt' ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,)['input_ids'].to(args.device )
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.rag.question_encoder(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : List[str] = rag_model.retriever(
__UpperCamelCase ,question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() ,prefix=rag_model.rag.generator.config.prefix ,n_docs=rag_model.config.n_docs ,return_tensors='pt' ,)
SCREAMING_SNAKE_CASE : List[str] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Any = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[str] = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: int ):
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase ,return_tensors='pt' ,padding=__UpperCamelCase ,truncation=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : List[str] = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase ,attention_mask=__UpperCamelCase ,num_beams=args.num_beams ,min_length=args.min_length ,max_length=args.max_length ,early_stopping=__UpperCamelCase ,num_return_sequences=1 ,bad_words_ids=[[0, 0]] ,)
SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase ,__UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase ,__UpperCamelCase ) )
return answers
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_type' ,choices=['rag_sequence', 'rag_token', 'bart'] ,type=__UpperCamelCase ,help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) ,)
parser.add_argument(
'--index_name' ,default=__UpperCamelCase ,choices=['exact', 'compressed', 'legacy'] ,type=__UpperCamelCase ,help='RAG model retriever type' ,)
parser.add_argument(
'--index_path' ,default=__UpperCamelCase ,type=__UpperCamelCase ,help='Path to the retrieval index' ,)
parser.add_argument('--n_docs' ,default=5 ,type=__UpperCamelCase ,help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' ,default=__UpperCamelCase ,type=__UpperCamelCase ,required=__UpperCamelCase ,help='Path to pretrained checkpoints or model identifier from huggingface.co/models' ,)
parser.add_argument(
'--eval_mode' ,choices=['e2e', 'retrieval'] ,default='e2e' ,type=__UpperCamelCase ,help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) ,)
parser.add_argument('--k' ,default=1 ,type=__UpperCamelCase ,help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' ,default=__UpperCamelCase ,type=__UpperCamelCase ,required=__UpperCamelCase ,help='Path to a file containing evaluation samples' ,)
parser.add_argument(
'--gold_data_path' ,default=__UpperCamelCase ,type=__UpperCamelCase ,required=__UpperCamelCase ,help='Path to a tab-separated file with gold samples' ,)
parser.add_argument(
'--gold_data_mode' ,default='qa' ,type=__UpperCamelCase ,choices=['qa', 'ans'] ,help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) ,)
parser.add_argument(
'--predictions_path' ,type=__UpperCamelCase ,default='predictions.txt' ,help='Name of the predictions file, to be stored in the checkpoints directory' ,)
parser.add_argument(
'--eval_all_checkpoints' ,action='store_true' ,help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' ,)
parser.add_argument(
'--eval_batch_size' ,default=8 ,type=__UpperCamelCase ,help='Batch size per GPU/CPU for evaluation.' ,)
parser.add_argument(
'--recalculate' ,help='Recalculate predictions even if the prediction file exists' ,action='store_true' ,)
parser.add_argument(
'--num_beams' ,default=4 ,type=__UpperCamelCase ,help='Number of beams to be used when generating answers' ,)
parser.add_argument('--min_length' ,default=1 ,type=__UpperCamelCase ,help='Min length of the generated answers' )
parser.add_argument('--max_length' ,default=50 ,type=__UpperCamelCase ,help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' ,action='store_true' ,help='If True, prints predictions while evaluating.' ,)
parser.add_argument(
'--print_docs' ,action='store_true' ,help='If True, prints docs retried while generating.' ,)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Any = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
SCREAMING_SNAKE_CASE : Any = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Any = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase ,args.predictions_path ,args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' ,args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
SCREAMING_SNAKE_CASE : Tuple = RagRetriever.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase ,retriever=__UpperCamelCase ,**__UpperCamelCase )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set ,'r' ) as eval_file, open(args.predictions_path ,'w' ) as preds_file:
SCREAMING_SNAKE_CASE : Optional[Any] = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : Dict = evaluate_batch_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Optional[Any] = []
if len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase ,args.predictions_path ,args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase_ = get_args()
main(args)
| 508 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''pixel_values''']
def __init__( self, A = True, A = None, A = PILImageResampling.BICUBIC, A = True, A = True, A = 1 / 255, A = None, A = True, A = None, A = None, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Any = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(A )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(A, default_to_square=A, param_name='crop_size' )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self, A, A, A = PILImageResampling.BILINEAR, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(A )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(A, size=size['shortest_edge'], default_to_square=A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Union[str, Any] = (size['height'], size['width'])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(A, size=A, resample=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A, size=(size['height'], size['width']), data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A ):
'''simple docstring'''
return rescale(A, scale=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A, A = None, **A, ):
'''simple docstring'''
return normalize(A, mean=A, std=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = ChannelDimension.FIRST, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(A, param_name='crop_size', default_to_square=A )
SCREAMING_SNAKE_CASE : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(A )
if not is_batched(A ):
SCREAMING_SNAKE_CASE : List[Any] = [images]
if not valid_images(A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=A, size=A, resample=A ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[Any] = [self.center_crop(image=A, size=A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.rescale(image=A, scale=A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=A, mean=A, std=A ) for image in images]
SCREAMING_SNAKE_CASE : List[Any] = [to_channel_dimension_format(A, A ) for image in images]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=A, tensor_type=A )
| 508 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_a : Optional[int] = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCamelCase__ ( _A: Any , _A: tuple , _A: Path , _A: Tuple , _A: Optional[Any] , _A: Union[str, Any] , _A: Dict , _A: str=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=_A , exist_ok=_A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_A , _A , f=output_path.as_posix() , input_names=_A , output_names=_A , dynamic_axes=_A , do_constant_folding=_A , use_external_data_format=_A , enable_onnx_checker=_A , opset_version=_A , )
else:
export(
_A , _A , f=output_path.as_posix() , input_names=_A , output_names=_A , dynamic_axes=_A , do_constant_folding=_A , opset_version=_A , )
@torch.no_grad()
def UpperCamelCase__ ( _A: str , _A: str , _A: int , _A: bool = False ):
'''simple docstring'''
__lowerCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCamelCase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__lowerCamelCase = """cpu"""
__lowerCamelCase = Path(_A )
# VAE DECODER
__lowerCamelCase = AutoencoderKL.from_pretrained(model_path + """/vae""" )
__lowerCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowerCamelCase = vae_decoder.decode
onnx_export(
_A , model_args=(
torch.randn(1 , _A , 25 , 25 ).to(device=_A , dtype=_A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=_A , )
del vae_decoder
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
_a : Dict = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 479 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase__ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """mock-s3-bucket"""
__lowerCamelCase = f'''s3://{mock_bucket}'''
__lowerCamelCase = extract_path_from_uri(_A )
assert dataset_path.startswith("""s3://""" ) is False
__lowerCamelCase = """./local/path"""
__lowerCamelCase = extract_path_from_uri(_A )
assert dataset_path == new_dataset_path
def UpperCamelCase__ ( _A: List[Any] ):
'''simple docstring'''
__lowerCamelCase = is_remote_filesystem(_A )
assert is_remote is True
__lowerCamelCase = fsspec.filesystem("""file""" )
__lowerCamelCase = is_remote_filesystem(_A )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , _A )
def UpperCamelCase__ ( _A: List[str] , _A: Tuple , _A: List[Any] , _A: Any , _A: List[Any] , _A: Optional[int] , _A: List[str] ):
'''simple docstring'''
__lowerCamelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
__lowerCamelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
__lowerCamelCase = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_A )
__lowerCamelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_A )
assert isinstance(_A , _A )
__lowerCamelCase = os.path.basename(_A )
__lowerCamelCase = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(_A , """r""" , encoding="""utf-8""" ) as f, open(_A , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def UpperCamelCase__ ( _A: Optional[Any] , _A: Union[str, Any] , _A: int ):
'''simple docstring'''
__lowerCamelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
__lowerCamelCase = compressed_file_paths[protocol]
__lowerCamelCase = """dataset.jsonl"""
__lowerCamelCase = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
__lowerCamelCase , *__lowerCamelCase = fsspec.get_fs_token_paths(_A )
assert fs.isfile(_A )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def UpperCamelCase__ ( _A: str , _A: str , _A: Optional[int] , _A: Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = hf_api.dataset_info(_A , token=_A )
__lowerCamelCase = HfFileSystem(repo_info=_A , token=_A )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(_A ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_A , _A , clobber=_A )
with pytest.warns(_A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_A ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 479 | 1 |
from math import factorial
def UpperCamelCase_( _A :int = 1_00 )-> int:
return sum(map(_A , str(factorial(_A ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 185 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def snake_case__ ( snake_case ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self ):
'''simple docstring'''
raise NotImplementedError()
| 185 | 1 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1_6000 ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : int = int(round(sample_rate * max_length ) )
if len(UpperCamelCase ) <= sample_length:
return wav
__UpperCAmelCase : int = randint(0 , len(UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a__ :
lowercase_ = field(default=__magic_name__ , metadata={"help": "Name of a dataset from the datasets package"} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "A file containing the training audio paths and labels."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "A file containing the validation audio paths and labels."} )
lowercase_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowercase_ = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowercase_ = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
lowercase_ = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
lowercase_ = field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase_ = field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowercase_ = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class a__ :
lowercase_ = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
lowercase_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Name or path of preprocessor config."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
lowercase_ = field(
default=__magic_name__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def a_ ( self : Optional[int]):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , UpperCamelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`.")
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , UpperCamelCase , UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__UpperCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
__UpperCAmelCase : Optional[int] = DatasetDict()
__UpperCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__UpperCAmelCase : List[str] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__UpperCAmelCase : Tuple = feature_extractor.model_input_names[0]
def train_transforms(UpperCamelCase ):
__UpperCAmelCase : Optional[int] = []
for audio in batch[data_args.audio_column_name]:
__UpperCAmelCase : int = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCamelCase )
__UpperCAmelCase : Any = feature_extractor(UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
__UpperCAmelCase : Any = {model_input_name: inputs.get(UpperCamelCase )}
__UpperCAmelCase : int = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCamelCase ):
__UpperCAmelCase : Any = [audio["array"] for audio in batch[data_args.audio_column_name]]
__UpperCAmelCase : List[Any] = feature_extractor(UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
__UpperCAmelCase : Union[str, Any] = {model_input_name: inputs.get(UpperCamelCase )}
__UpperCAmelCase : List[Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__UpperCAmelCase : Tuple = raw_datasets["train"].features[data_args.label_column_name].names
__UpperCAmelCase , __UpperCAmelCase : List[Any] = {}, {}
for i, label in enumerate(UpperCamelCase ):
__UpperCAmelCase : List[str] = str(UpperCamelCase )
__UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
__UpperCAmelCase : Union[str, Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase ):
__UpperCAmelCase : str = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCamelCase , references=eval_pred.label_ids )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel=UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Tuple = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__UpperCAmelCase : Any = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCamelCase , output_all_columns=UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Union[str, Any] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCamelCase , output_all_columns=UpperCamelCase )
# Initialize our trainer
__UpperCAmelCase : Optional[int] = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=UpperCamelCase , tokenizer=UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : Tuple = last_checkpoint
__UpperCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCAmelCase : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("eval" , UpperCamelCase )
trainer.save_metrics("eval" , UpperCamelCase )
# Write model card and (optionally) push to hub
__UpperCAmelCase : Any = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase )
else:
trainer.create_model_card(**UpperCamelCase )
if __name__ == "__main__":
main()
| 77 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_: int = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Any = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Any = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: List[Any] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowercase_: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 648 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A__ , """num_attention_heads""" ) )
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=64 , A__=3 , A__=3 , A__=2 , A__=1 , A__=16 , A__=[128, 256, 384] , A__=[4, 6, 8] , A__=[2, 3, 4] , A__=[16, 16, 16] , A__=0 , A__=[2, 2, 2] , A__=[2, 2, 2] , A__=0.0_2 , A__=True , A__=True , A__=2 , ):
A__ : Dict = parent
A__ : str = batch_size
A__ : Dict = image_size
A__ : Optional[int] = num_channels
A__ : List[str] = kernel_size
A__ : List[str] = stride
A__ : Union[str, Any] = padding
A__ : int = hidden_sizes
A__ : Union[str, Any] = num_attention_heads
A__ : Union[str, Any] = depths
A__ : List[str] = key_dim
A__ : Any = drop_path_rate
A__ : List[Any] = patch_size
A__ : Optional[int] = attention_ratio
A__ : Optional[Any] = mlp_ratio
A__ : List[Any] = initializer_range
A__ : List[Any] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
A__ : int = is_training
A__ : List[str] = use_labels
A__ : Tuple = num_labels
A__ : int = initializer_range
def __A ( self ):
A__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any = None
if self.use_labels:
A__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
A__ : str = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __A ( self , A__ , A__ , A__ ):
A__ : Optional[int] = LevitModel(config=A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(A__ )
A__ : str = (self.image_size, self.image_size)
A__ : Any = image_size[0], image_size[1]
for _ in range(4 ):
A__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
A__ : Union[str, Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __A ( self , A__ , A__ , A__ ):
A__ : Optional[Any] = self.num_labels
A__ : Any = LevitForImageClassification(A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
A__ : Tuple = self.prepare_config_and_inputs()
A__ : List[Any] = config_and_inputs
A__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCAmelCase__: Union[str, Any] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__: List[str] = False
UpperCAmelCase__: List[Any] = False
UpperCAmelCase__: int = False
UpperCAmelCase__: Optional[int] = False
UpperCAmelCase__: Dict = False
def __A ( self ):
A__ : Dict = LevitModelTester(self )
A__ : Optional[int] = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def __A ( self ):
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def __A ( self ):
pass
def __A ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : str = model_class(A__ )
A__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : str = [*signature.parameters.keys()]
A__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def __A ( self ):
def check_hidden_states_output(A__ , A__ , A__ ):
A__ : str = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
A__ : List[Any] = model(**self._prepare_for_class(A__ , A__ ) )
A__ : List[Any] = outputs.hidden_states
A__ : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(A__ ) , A__ )
A__ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
A__ : List[str] = image_size[0], image_size[1]
for _ in range(4 ):
A__ : str = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
A__ : List[str] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Union[str, Any] = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Dict = True
check_hidden_states_output(A__ , A__ , A__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __A ( self ):
pass
def __A ( self , A__ , A__ , A__=False ):
A__ : Tuple = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
def __A ( self ):
if not self.model_tester.is_training:
return
A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Dict = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
A__ : Dict = model_class(A__ )
model.to(A__ )
model.train()
A__ : Union[str, Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ )
A__ : List[Any] = model(**A__ ).loss
loss.backward()
def __A ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A__ : Dict = False
A__ : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(A__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
A__ : List[Any] = model_class(A__ )
model.gradient_checkpointing_enable()
model.to(A__ )
model.train()
A__ : Optional[Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ )
A__ : Any = model(**A__ ).loss
loss.backward()
def __A ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
A__ : Any = problem_type["""title"""]
A__ : Any = problem_type["""num_labels"""]
A__ : List[Any] = model_class(A__ )
model.to(A__ )
model.train()
A__ : Optional[Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ )
if problem_type["num_labels"] > 1:
A__ : Any = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A__ : Dict = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A__ ) as warning_list:
A__ : Dict = model(**A__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __A ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = LevitModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCamelCase () -> Dict:
A__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __A ( self ):
A__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A__ )
A__ : Optional[int] = self.default_image_processor
A__ : List[Any] = prepare_img()
A__ : Dict = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
A__ : Optional[int] = model(**A__ )
# verify the logits
A__ : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
A__ : Any = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 701 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_snake_case = 25_6047
_snake_case = 25_6145
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = NllbTokenizer
lowerCamelCase__ = NllbTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = {}
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : str = NllbTokenizer(__a, keep_accents=__a)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = NllbTokenizer(__a, keep_accents=__a)
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(__a, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(
__a, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(__a)
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(__a)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
_lowerCAmelCase : List[str] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(__a, __a)
# Checks everything loads correctly in the same way
_lowerCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(__a)
_lowerCAmelCase : str = tokenizer_p.from_pretrained(__a)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a, __a))
shutil.rmtree(__a)
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : List[str] = tokenizer_r.save_pretrained(__a, legacy_format=__a)
_lowerCAmelCase : Optional[Any] = tokenizer_p.save_pretrained(__a)
# Checks it save with the same files
self.assertSequenceEqual(__a, __a)
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[Any] = tokenizer_r.from_pretrained(__a)
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(__a)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a, __a))
shutil.rmtree(__a)
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(__a, legacy_format=__a)
_lowerCAmelCase : Dict = tokenizer_p.save_pretrained(__a)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[Any] = tokenizer_r.from_pretrained(__a)
_lowerCAmelCase : Dict = tokenizer_p.from_pretrained(__a)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a, __a))
shutil.rmtree(__a)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
if not self.test_seqaseq:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Longer text that will definitely require truncation.
_lowerCAmelCase : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
_lowerCAmelCase : Optional[int] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
_lowerCAmelCase : Optional[int] = tokenizer.prepare_seqaseq_batch(
src_texts=__a, tgt_texts=__a, max_length=3, max_target_length=10, return_tensors="pt", src_lang="eng_Latn", tgt_lang="ron_Latn", )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 10)
# max_target_length will default to max_length if not specified
_lowerCAmelCase : Dict = tokenizer.prepare_seqaseq_batch(
__a, tgt_texts=__a, max_length=3, return_tensors="pt")
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 3)
_lowerCAmelCase : List[Any] = tokenizer.prepare_seqaseq_batch(
src_texts=__a, max_length=3, max_target_length=10, return_tensors="pt")
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", __a)
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : Tuple = [AddedToken("<special>", lstrip=__a)]
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a, additional_special_tokens=__a, **__a)
_lowerCAmelCase : Tuple = tokenizer_r.encode("Hey this is a <special> token")
_lowerCAmelCase : Tuple = tokenizer_r.encode("<special>", add_special_tokens=__a)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
_lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__a, additional_special_tokens=__a, **__a, )
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(
__a, additional_special_tokens=__a, **__a)
_lowerCAmelCase : Any = tokenizer_p.encode("Hey this is a <special> token")
_lowerCAmelCase : List[Any] = tokenizer_cr.encode("Hey this is a <special> token")
self.assertEqual(__a, __a)
self.assertEqual(__a, __a)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = 'facebook/nllb-200-distilled-600M'
lowerCamelCase__ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase__ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase__ = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="eng_Latn", tgt_lang="ron_Latn")
_lowerCAmelCase : List[Any] = 1
return cls
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"], 25_6001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"], 25_6002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"], 25_6057)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, __a)
def snake_case__ ( self):
'''simple docstring'''
self.assertIn(__a, self.tokenizer.all_special_ids)
# fmt: off
_lowerCAmelCase : Tuple = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
_lowerCAmelCase : Union[str, Any] = self.tokenizer.decode(__a, skip_special_tokens=__a)
_lowerCAmelCase : int = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=__a)
self.assertEqual(__a, __a)
self.assertNotIn(self.tokenizer.eos_token, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0], __a)
_lowerCAmelCase : Any = 10
_lowerCAmelCase : Optional[Any] = self.tokenizer(__a, max_length=__a, truncation=__a).input_ids[0]
self.assertEqual(ids[-1], 2)
self.assertEqual(ids[0], __a)
self.assertEqual(len(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [25_6203, 3])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = tempfile.mkdtemp()
_lowerCAmelCase : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a)
_lowerCAmelCase : Dict = NllbTokenizer.from_pretrained(__a)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, __a)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=__a, truncation=__a, max_length=len(self.expected_src_tokens), return_tensors="pt", )
_lowerCAmelCase : str = shift_tokens_right(
batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.lang_code_to_id["ron_Latn"])
self.assertIsInstance(__a, __a)
self.assertEqual((2, 15), batch.input_ids.shape)
self.assertEqual((2, 15), batch.attention_mask.shape)
_lowerCAmelCase : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, __a)
self.assertEqual(__a, batch.decoder_input_ids[0, 0]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text, padding=__a, truncation=__a, max_length=3, return_tensors="pt")
_lowerCAmelCase : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text, padding=__a, truncation=__a, max_length=10, return_tensors="pt")
_lowerCAmelCase : str = targets["input_ids"]
_lowerCAmelCase : Any = shift_tokens_right(
__a, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang], )
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="eng_Latn", tgt_lang="fra_Latn")
self.assertEqual(
nested_simplify(__a), {
# A, test, EOS, en_XX
"input_ids": [[25_6047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_6057,
}, )
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : str = self.tokenizer(
"UN Chief says there is no military solution in Syria", src_lang="eng_Latn", tgt_lang="fra_Latn")
self.assertEqual(
inputs.input_ids, [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047])
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Any = self.tokenizer(
"UN Chief says there is no military solution in Syria", src_lang="eng_Latn", tgt_lang="fra_Latn")
self.assertEqual(
inputs.input_ids, [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2])
| 500 |
from __future__ import annotations
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = text, pattern
_lowerCAmelCase , _lowerCAmelCase : int = len(__a), len(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1):
if char == self.pattern[i]:
return i
return -1
def snake_case__ ( self, __a):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(self.textLen - self.patLen + 1):
_lowerCAmelCase : Dict = self.mismatch_in_text(__a)
if mismatch_index == -1:
positions.append(__a)
else:
_lowerCAmelCase : List[str] = self.match_in_pattern(self.text[mismatch_index])
_lowerCAmelCase : Dict = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_snake_case = "ABAABA"
_snake_case = "AB"
_snake_case = BoyerMooreSearch(text, pattern)
_snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 500 | 1 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCAmelCase__ = get_tests_dir('''fixtures/vocab.json''')
lowerCAmelCase__ = get_tests_dir('''fixtures''')
class __snake_case ( unittest.TestCase):
snake_case__ : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = 0
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[int] = WavaVecaConfig()
_lowerCamelCase : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''vocab.json''' ) )
_lowerCamelCase : Optional[int] = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Tuple = WavaVecaFeatureExtractor()
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
_lowerCamelCase : Dict = WavaVecaProcessor(__lowerCAmelCase , __lowerCAmelCase )
# save in new folder
processor.save_pretrained(__lowerCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , '''r''' ) as f:
_lowerCamelCase : Optional[int] = json.load(__lowerCAmelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , '''w''' ) as f:
f.write(json.dumps(__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[str] = WavaVecaFeatureExtractor()
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
_lowerCamelCase : Dict = WavaVecaProcessor(__lowerCAmelCase , __lowerCAmelCase )
# save in new folder
processor.save_pretrained(__lowerCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , '''r''' ) as f:
_lowerCamelCase : int = json.load(__lowerCAmelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , '''w''' ) as f:
f.write(json.dumps(__lowerCAmelCase ) )
_lowerCamelCase : Any = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Dict = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(__lowerCAmelCase )
# copy relevant files
copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , '''w''' ) as f:
f.write('''{}''' )
_lowerCamelCase : int = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[str] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Any = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
_lowerCamelCase : str = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
_lowerCamelCase : Dict = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
_lowerCamelCase : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
_lowerCamelCase : str = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
try:
AutoConfig.register('''custom''' , __lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : int = os.path.join(__lowerCAmelCase , '''vocab.txt''' )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_lowerCamelCase : int = CustomTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = CustomProcessor(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoProcessor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = False
class __snake_case ( _lowercase):
snake_case__ : List[str] = False
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "AutoFeatureExtractor"
snake_case__ : Tuple = "AutoTokenizer"
snake_case__ : Dict = False
try:
AutoConfig.register('''custom''' , __lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase )
# If remote code is not set, the default is to use local classes.
_lowerCamelCase : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowerCamelCase : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCAmelCase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowerCamelCase : int = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCAmelCase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class __snake_case ( unittest.TestCase):
snake_case__ : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = WavaVecaProcessor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowerCAmelCase , '''test-processor''' ) , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : str = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(new_processor.feature_extractor , __lowerCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = WavaVecaProcessor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowerCAmelCase , '''test-processor-org''' ) , push_to_hub=__lowerCAmelCase , use_auth_token=self._token , organization='''valid_org''' , )
_lowerCamelCase : str = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(new_processor.feature_extractor , __lowerCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowerCamelCase : Tuple = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : int = os.path.join(__lowerCAmelCase , '''vocab.txt''' )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_lowerCamelCase : str = CustomTokenizer(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = CustomProcessor(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
_lowerCamelCase : List[str] = Repository(__lowerCAmelCase , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__lowerCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) ) as f:
_lowerCamelCase : Tuple = json.load(__lowerCAmelCase )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''custom_processing.py''' ) ) )
repo.push_to_hub()
_lowerCamelCase : List[str] = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 598 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase):
@parameterized.expand([(None,), ('''foo.json''',)] )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase )
_lowerCamelCase : int = GenerationConfig.from_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoConfig.from_pretrained('''gpt2''' )
_lowerCamelCase : List[Any] = GenerationConfig.from_model_config(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = GenerationConfig()
_lowerCamelCase : Any = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
_lowerCamelCase : Optional[Any] = copy.deepcopy(__lowerCAmelCase )
_lowerCamelCase : List[str] = generation_config.update(**__lowerCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCAmelCase , {'''foo''': '''bar'''} )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = GenerationConfig()
_lowerCamelCase : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(__lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
_lowerCamelCase : Any = GenerationConfig.from_model_config(__lowerCAmelCase )
assert not hasattr(__lowerCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __lowerCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
_lowerCamelCase : int = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __lowerCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(__lowerCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id='''test-generation-config''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : str = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
| 598 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Optional[int] ='speech_to_text_2'
UpperCamelCase__ : int =['past_key_values']
UpperCamelCase__ : Tuple ={'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , lowercase_ : str=1_0000 , lowercase_ : Dict=6 , lowercase_ : List[str]=2048 , lowercase_ : Any=4 , lowercase_ : List[Any]=0.0 , lowercase_ : List[str]=True , lowercase_ : int="relu" , lowercase_ : Tuple=256 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[str]=0.02 , lowercase_ : Any=2 , lowercase_ : Any=True , lowercase_ : int=1 , lowercase_ : List[str]=0 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=1024 , **lowercase_ : int , ) -> int:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =vocab_size
_lowerCamelCase : Tuple =d_model
_lowerCamelCase : Optional[int] =decoder_ffn_dim
_lowerCamelCase : Optional[int] =decoder_layers
_lowerCamelCase : List[Any] =decoder_attention_heads
_lowerCamelCase : str =dropout
_lowerCamelCase : Dict =attention_dropout
_lowerCamelCase : Dict =activation_dropout
_lowerCamelCase : int =activation_function
_lowerCamelCase : int =init_std
_lowerCamelCase : Dict =decoder_layerdrop
_lowerCamelCase : List[str] =use_cache
_lowerCamelCase : List[str] =decoder_layers
_lowerCamelCase : str =scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : Dict =max_target_positions
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 464 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class A ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , lowercase_ : int ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] =3
_lowerCamelCase : Dict =250
_lowerCamelCase : Tuple =ids_tensor((batch_size, length) , lowercase_ )
_lowerCamelCase : str =torch.ones((batch_size, length) , device=lowercase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self._get_tensors(5 )
_lowerCamelCase : Dict =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : List[str] =self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Optional[int] =self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Any =MaxLengthCriteria(max_length=10 )
_lowerCamelCase , _lowerCamelCase : List[str] =self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Optional[int] =self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Optional[int] =self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_lowerCamelCase , _lowerCamelCase : str =self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Dict =self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase : Optional[Any] =StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self._get_tensors(5 )
_lowerCamelCase : Tuple =MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase : int =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_lowerCamelCase : Optional[Any] =validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase_ ) , 1 )
| 464 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = projection_dim
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowerCamelCase__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 721 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf )
lowerCamelCase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase__ = new_cost_f
lowerCamelCase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = -1
lowerCamelCase__ = set()
lowerCamelCase__ = set()
lowerCamelCase__ = {source: 0}
lowerCamelCase__ = {destination: 0}
lowerCamelCase__ = {source: None}
lowerCamelCase__ = {destination: None}
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase__ , lowerCamelCase__ = queue_forward.get()
visited_forward.add(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = queue_backward.get()
visited_backward.add(__snake_case )
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase__ = shortest_distance
return shortest_path_distance
_a = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_a = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _A : List[str] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : int = 13
__SCREAMING_SNAKE_CASE : Any = 7
__SCREAMING_SNAKE_CASE : List[str] = 30
__SCREAMING_SNAKE_CASE : List[Any] = self.seq_length + self.mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = 15
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = 99
__SCREAMING_SNAKE_CASE : Optional[Any] = [10, 50, 80]
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : Dict = 4
__SCREAMING_SNAKE_CASE : Union[str, Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = 128
__SCREAMING_SNAKE_CASE : Optional[int] = 2
__SCREAMING_SNAKE_CASE : Optional[Any] = 2
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : int = 3
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 0.01
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : str , _A : Any , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = TFTransfoXLModel(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = model(_A ).to_tuple()
__SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': input_ids_a, '''mems''': mems_a}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = model(_A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ ( self : Dict , _A : int , _A : List[str] , _A : int , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = TFTransfoXLLMHeadModel(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = model(_A ).to_tuple()
__SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = model(_A ).to_tuple()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = model([input_ids_a, mems_a] ).to_tuple()
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = model(_A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ ( self : Dict , _A : Tuple , _A : int , _A : Dict , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = TFTransfoXLForSequenceClassification(_A )
__SCREAMING_SNAKE_CASE : int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase_ = () if is_tf_available() else ()
lowerCAmelCase_ = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFTransfoXLModelTester(self )
__SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_A , d_embed=37 )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
self.model_tester.set_seed()
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self.model_tester.set_seed()
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Optional[int] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Dict = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__SCREAMING_SNAKE_CASE : List[Any] = model.get_output_embeddings()
assert isinstance(_A , tf.keras.layers.Layer )
__SCREAMING_SNAKE_CASE : Any = model.get_bias()
assert name is None
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = model.get_output_embeddings()
assert x is None
__SCREAMING_SNAKE_CASE : Dict = model.get_bias()
assert name is None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = TFTransfoXLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
pass
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__SCREAMING_SNAKE_CASE : List[Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__SCREAMING_SNAKE_CASE : Optional[int] = model.generate(_A , max_length=200 , do_sample=_A )
self.assertListEqual(output_ids[0].numpy().tolist() , _A )
| 74 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE : Tuple = in_channels
__SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A )
# 3. Define transformers blocks
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , )
for d in range(_A )
] )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape
__SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
__SCREAMING_SNAKE_CASE : Dict = hidden_states
__SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A )
__SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A )
# 2. Blocks
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE : Optional[Any] = block(
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , )
# 3. Output
__SCREAMING_SNAKE_CASE : Any = self.proj_out(_A )
__SCREAMING_SNAKE_CASE : List[str] = (
hidden_states[None, None, :]
.reshape(_A , _A , _A , _A , _A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_A )
| 74 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _a ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "dinat"
__SCREAMING_SNAKE_CASE = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowerCAmelCase_=4 , lowerCAmelCase_=3 , lowerCAmelCase_=64 , lowerCAmelCase_=[3, 4, 6, 5] , lowerCAmelCase_=[2, 4, 8, 16] , lowerCAmelCase_=7 , lowerCAmelCase_=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCAmelCase_=3.0 , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-5 , lowerCAmelCase_=0.0 , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
super().__init__(**_snake_case )
_lowercase =patch_size
_lowercase =num_channels
_lowercase =embed_dim
_lowercase =depths
_lowercase =len(_snake_case )
_lowercase =num_heads
_lowercase =kernel_size
_lowercase =dilations
_lowercase =mlp_ratio
_lowercase =qkv_bias
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =drop_path_rate
_lowercase =hidden_act
_lowercase =layer_norm_eps
_lowercase =initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowercase =int(embed_dim * 2 ** (len(_snake_case ) - 1) )
_lowercase =layer_scale_init_value
_lowercase =["stem"] + [F'''stage{idx}''' for idx in range(1 , len(_snake_case ) + 1 )]
_lowercase , _lowercase =get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
| 708 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCAmelCase__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCAmelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCAmelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
lowerCAmelCase__ = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
lowerCAmelCase__ = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
lowerCAmelCase__ = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(6_4, 6_4)
)
lowerCAmelCase__ = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCAmelCase__ = np.expand_dims(test_image, axis=0)
lowerCAmelCase__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCAmelCase__ = "Normal"
if result[0][0] == 1:
lowerCAmelCase__ = "Abnormality detected"
| 594 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''T5Config'''
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> jnp.ndarray:
A__ = jnp.zeros_like(__UpperCamelCase )
A__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A__ = shifted_input_ids.at[:, 0].set(__UpperCamelCase )
A__ = jnp.where(shifted_input_ids == -100 , __UpperCamelCase , __UpperCamelCase )
return shifted_input_ids
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Tuple = "mt5"
A__ : int = MTaConfig
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : str = "mt5"
A__ : List[str] = MTaConfig
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[int] = "mt5"
A__ : Union[str, Any] = MTaConfig
| 9 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : List[str] ={
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any =[
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : int =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE_ ( )-> Generator[int, None, None]:
_lowerCamelCase = {}
_lowerCamelCase = 2
while True:
_lowerCamelCase = factor_map.pop(snake_case , snake_case )
if factor:
_lowerCamelCase = factor + prime
while x in factor_map:
x += factor
_lowerCamelCase = factor
else:
_lowerCamelCase = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE_ ( snake_case : float = 1e10 )-> int:
_lowerCamelCase = sieve()
_lowerCamelCase = 1
while True:
_lowerCamelCase = next(snake_case )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(snake_case )
n += 2
if __name__ == "__main__":
print(solution())
| 222 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase_ : List[str] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCAmelCase ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Base Case
if index == len(__lowerCAmelCase ):
return True
# Recursive Step
for i in range(__lowerCAmelCase ):
if valid_coloring(graph[index] , __lowerCAmelCase , __lowerCAmelCase ):
# Color current vertex
_snake_case : int = i
# Validate coloring
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 ):
return True
# Backtrack
_snake_case : Optional[Any] = -1
return False
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = [-1] * len(__lowerCAmelCase )
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 ):
return colored_vertices
return []
| 304 | 1 |
import string
def A ( snake_case__ : str ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__snake_case = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__snake_case = string.ascii_uppercase.find(snake_case__ )
__snake_case = num - key
if num < 0:
__snake_case = num + len(string.ascii_uppercase )
__snake_case = translated + string.ascii_uppercase[num]
else:
__snake_case = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def A ( ) -> None:
'''simple docstring'''
__snake_case = input('Encrypted message: ' )
__snake_case = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 676 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ["""MaskFormerFeatureExtractor"""]
snake_case : Optional[int] = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
snake_case : Optional[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 545 |
"""simple docstring"""
def A ( __snake_case: int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
__magic_name__ = limit + 1
__magic_name__ = [0] * limit
for first_term in range(1 , __snake_case ):
for n in range(__snake_case , __snake_case , __snake_case ):
__magic_name__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__magic_name__ = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""") | 545 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCamelCase , cache_dir=lowerCamelCase )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(lowerCamelCase , os.listdir(lowerCamelCase )[0] , """snapshots""" ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCamelCase )
_UpperCAmelCase = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(lowerCamelCase )
_UpperCAmelCase = jax.random.split(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = shard(lowerCamelCase )
_UpperCAmelCase = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase ) == num_samples
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=lowerCamelCase )
_UpperCAmelCase = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(lowerCamelCase )
_UpperCAmelCase = jax.random.split(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = shard(lowerCamelCase )
_UpperCAmelCase = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase )
_UpperCAmelCase = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(lowerCamelCase )
_UpperCAmelCase = jax.random.split(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = shard(lowerCamelCase )
_UpperCAmelCase = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
_UpperCAmelCase = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(lowerCamelCase )
_UpperCAmelCase = jax.random.split(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = shard(lowerCamelCase )
_UpperCAmelCase = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(lowerCamelCase )
_UpperCAmelCase = jax.random.split(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = shard(lowerCamelCase )
_UpperCAmelCase = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase , )
_UpperCAmelCase = replicate(lowerCamelCase )
_UpperCAmelCase = pipeline.prepare_inputs(lowerCamelCase )
_UpperCAmelCase = shard(lowerCamelCase )
_UpperCAmelCase = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase , use_memory_efficient_attention=lowerCamelCase , )
_UpperCAmelCase = replicate(lowerCamelCase )
_UpperCAmelCase = pipeline.prepare_inputs(lowerCamelCase )
_UpperCAmelCase = shard(lowerCamelCase )
_UpperCAmelCase = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2 | 402 |
from collections import deque
from math import floor
from random import random
from time import time
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
def lowerCamelCase ( self : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Any=1 ) -> int:
"""simple docstring"""
if self.graph.get(lowerCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase = [[w, v]]
if not self.graph.get(lowerCamelCase ):
_UpperCAmelCase = []
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return list(self.graph )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
if self.graph.get(lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : List[str]=-2 , lowerCamelCase : List[str]=-1 ) -> Any:
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return visited
def lowerCamelCase ( self : Any , lowerCamelCase : Optional[int]=-1 ) -> int:
"""simple docstring"""
if c == -1:
_UpperCAmelCase = floor(random() * 1_0000 ) + 10
for i in range(lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase , lowerCamelCase , 1 )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : str=-2 ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(lowerCamelCase )
visited.append(lowerCamelCase )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase ( self : List[str] , lowerCamelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
return len(self.graph[u] )
def lowerCamelCase ( self : str , lowerCamelCase : Optional[int]=-2 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = s
_UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return sorted_nodes
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(lowerCamelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return list(lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(lowerCamelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return False
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Any=-2 , lowerCamelCase : List[Any]=-1 ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = time()
self.dfs(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = time()
return end - begin
def lowerCamelCase ( self : str , lowerCamelCase : Optional[Any]=-2 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = time()
self.bfs(lowerCamelCase )
_UpperCAmelCase = time()
return end - begin
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=1 ) -> Tuple:
"""simple docstring"""
# check if the u exists
if self.graph.get(lowerCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase = [[w, u]]
def lowerCamelCase ( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : int ) -> Dict:
"""simple docstring"""
if self.graph.get(lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase )
# the other way round
if self.graph.get(lowerCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=-2 , lowerCamelCase : int=-1 ) -> Optional[Any]:
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return visited
def lowerCamelCase ( self : List[Any] , lowerCamelCase : str=-1 ) -> List[str]:
"""simple docstring"""
if c == -1:
_UpperCAmelCase = floor(random() * 1_0000 ) + 10
for i in range(lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase , lowerCamelCase , 1 )
def lowerCamelCase ( self : Any , lowerCamelCase : List[Any]=-2 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(lowerCamelCase )
visited.append(lowerCamelCase )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase ( self : Any , lowerCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return len(self.graph[u] )
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(lowerCamelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return list(lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(lowerCamelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return False
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
return list(self.graph )
def lowerCamelCase ( self : str , lowerCamelCase : str=-2 , lowerCamelCase : Optional[int]=-1 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = time()
self.dfs(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = time()
return end - begin
def lowerCamelCase ( self : Any , lowerCamelCase : List[Any]=-2 ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = time()
self.bfs(lowerCamelCase )
_UpperCAmelCase = time()
return end - begin | 402 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = coefficient_matrix.shape
lowerCAmelCase__ , lowerCAmelCase__ = constant_matrix.shape
if rowsa != colsa:
lowerCAmelCase__ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
lowerCAmelCase__ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
lowerCAmelCase__ = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
lowerCAmelCase__ = (
"""Number of initial values must be equal to number of rows in coefficient """
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowerCAmelCase__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCAmelCase__ , lowerCAmelCase__ = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
lowerCAmelCase__ = []
for row in range(lowerCamelCase__ ):
lowerCAmelCase__ = 0
for col in range(lowerCamelCase__ ):
if col == row:
lowerCAmelCase__ = table[row][col]
elif col == cols - 1:
lowerCAmelCase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCAmelCase__ = (temp + val) / denom
new_val.append(lowerCamelCase__ )
lowerCAmelCase__ = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = table.shape
lowerCAmelCase__ = True
for i in range(0 , lowerCamelCase__ ):
lowerCAmelCase__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 | """simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase : Optional[Any] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCAmelCase : str = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__lowerCAmelCase : int = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def remove_articles(lowerCamelCase__ ):
lowerCAmelCase__ = re.compile(r"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(lowerCamelCase__ , """ """ , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
lowerCAmelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [any(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for ref in refs ) for pred, refs in zip(lowerCamelCase__ , lowerCamelCase__ )]
return (sum(lowerCamelCase__ ) / len(lowerCamelCase__ )) * 100
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCAmelCase__ = Counter(lowerCamelCase__ )
lowerCAmelCase__ = Counter(lowerCamelCase__ )
lowerCAmelCase__ = Counter()
for sgram, scount in sgramcounter.items():
lowerCAmelCase__ = scount * numref
lowerCAmelCase__ = Counter(lowerCamelCase__ )
lowerCAmelCase__ = Counter()
for cgram, ccount in cgramcounter.items():
lowerCAmelCase__ = ccount * numref
# KEEP
lowerCAmelCase__ = sgramcounter_rep & cgramcounter_rep
lowerCAmelCase__ = keepgramcounter_rep & rgramcounter
lowerCAmelCase__ = sgramcounter_rep & rgramcounter
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = keeptmpscorea / len(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCAmelCase__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCAmelCase__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCAmelCase__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCAmelCase__ = sgramcounter_rep - cgramcounter_rep
lowerCAmelCase__ = delgramcounter_rep - rgramcounter
lowerCAmelCase__ = sgramcounter_rep - rgramcounter
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase__ = 1
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = deltmpscorea / len(lowerCamelCase__ )
# ADDITION
lowerCAmelCase__ = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
lowerCAmelCase__ = set(lowerCamelCase__ ) & set(lowerCamelCase__ )
lowerCAmelCase__ = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
lowerCAmelCase__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = addtmpscore / len(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
lowerCAmelCase__ = addtmpscore / len(lowerCamelCase__ )
lowerCAmelCase__ = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCAmelCase__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = len(lowerCamelCase__ )
lowerCAmelCase__ = ssent.split(""" """ )
lowerCAmelCase__ = csent.split(""" """ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for rsent in rsents:
lowerCAmelCase__ = rsent.split(""" """ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
ragramslist.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
lowerCAmelCase__ = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
lowerCAmelCase__ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
lowerCAmelCase__ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
lowerCAmelCase__ = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
lowerCAmelCase__ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
lowerCAmelCase__ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
lowerCAmelCase__ = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
lowerCAmelCase__ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
lowerCAmelCase__ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(lowerCamelCase__ )
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCAmelCase__ = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCAmelCase__ = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCAmelCase__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = "13a" , lowerCamelCase__ = True ):
"""simple docstring"""
if lowercase:
lowerCAmelCase__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCAmelCase__ = sacrebleu.metrics.bleu._get_tokenizer(lowerCamelCase__ )()(lowerCamelCase__ )
else:
lowerCAmelCase__ = sacrebleu.TOKENIZERS[tokenizer]()(lowerCamelCase__ )
elif tokenizer == "moses":
lowerCAmelCase__ = sacremoses.MosesTokenizer().tokenize(lowerCamelCase__ , return_str=lowerCamelCase__ , escape=lowerCamelCase__ )
elif tokenizer == "penn":
lowerCAmelCase__ = sacremoses.MosesTokenizer().penn_tokenize(lowerCamelCase__ , return_str=lowerCamelCase__ )
else:
lowerCAmelCase__ = sentence
if not return_str:
lowerCAmelCase__ = normalized_sent.split()
return normalized_sent
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if not (len(lowerCamelCase__ ) == len(lowerCamelCase__ ) == len(lowerCamelCase__ )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
lowerCAmelCase__ = 0
for src, pred, refs in zip(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
sari_score += SARIsent(normalize(lowerCamelCase__ ) , normalize(lowerCamelCase__ ) , [normalize(lowerCamelCase__ ) for sent in refs] )
lowerCAmelCase__ = sari_score / len(lowerCamelCase__ )
return 100 * sari_score
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="exp" , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ):
"""simple docstring"""
lowerCAmelCase__ = len(references[0] )
if any(len(lowerCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCAmelCase__ = [[refs[i] for refs in references] for i in range(lowerCamelCase__ )]
lowerCAmelCase__ = sacrebleu.corpus_bleu(
lowerCamelCase__ , lowerCamelCase__ , smooth_method=lowerCamelCase__ , smooth_value=lowerCamelCase__ , force=lowerCamelCase__ , lowercase=lowerCamelCase__ , use_effective_order=lowerCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = {}
result.update({"""sari""": compute_sari(sources=snake_case__ , predictions=snake_case__ , references=snake_case__ )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=snake_case__ , references=snake_case__ )} )
result.update({"""exact""": compute_em(predictions=snake_case__ , references=snake_case__ )} )
return result
| 644 | 1 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase_ = logging.get_logger(__name__)
lowercase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowercase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowercase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowercase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowercase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowercase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowercase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowercase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowercase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowercase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowercase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowercase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : Any = FLAX_MODEL_MAPPING
lowercase_ = auto_class_update(FlaxAutoModel)
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : str = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : int = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : str = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __A ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 352 |
'''simple docstring'''
lowercase_ = 256
# Modulus to hash a string
lowercase_ = 1_000_003
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = len(__A)
_a = len(__A)
if p_len > t_len:
return False
_a = 0
_a = 0
_a = 1
# Calculating the hash of pattern and substring of text
for i in range(__A):
_a = (ord(pattern[i]) + p_hash * alphabet_size) % modulus
_a = (ord(text[i]) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_a = (
(text_hash - ord(text[i]) * modulus_power) * alphabet_size
+ ord(text[i + p_len])
) % modulus
return False
def lowerCAmelCase ():
"""simple docstring"""
_a = '''abc1abc12'''
_a = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_a = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__A , __A) and not rabin_karp(__A , __A)
# Test 2)
_a = '''ABABX'''
_a = '''ABABZABABYABABX'''
assert rabin_karp(__A , __A)
# Test 3)
_a = '''AAAB'''
_a = '''ABAAAAAB'''
assert rabin_karp(__A , __A)
# Test 4)
_a = '''abcdabcy'''
_a = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__A , __A)
# Test 5)
_a = '''Lü'''
_a = '''Lüsai'''
assert rabin_karp(__A , __A)
_a = '''Lue'''
assert not rabin_karp(__A , __A)
print('''Success.''')
if __name__ == "__main__":
test_rabin_karp()
| 352 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Tuple:
random.seed(snake_case__ )
np.random.seed(snake_case__ )
torch.manual_seed(snake_case__ )
torch.cuda.manual_seed_all(snake_case__ )
# ^^ safe to call this function even if cuda is not available
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Iterable[torch.nn.Parameter] ,__A : float = 0.9999 ,__A : float = 0.0 ,__A : int = 0 ,__A : bool = False ,__A : Union[float, int] = 1.0 ,__A : Union[float, int] = 2 / 3 ,__A : Optional[Any] = None ,__A : Dict[str, Any] = None ,**__A : int ,) -> Tuple:
if isinstance(__A ,torch.nn.Module ):
_lowercase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' ,'1.0.0' ,__A ,standard_warn=__A ,)
_lowercase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowercase = True
if kwargs.get('max_value' ,__A ) is not None:
_lowercase = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' ,'1.0.0' ,__A ,standard_warn=__A )
_lowercase = kwargs['max_value']
if kwargs.get('min_value' ,__A ) is not None:
_lowercase = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' ,'1.0.0' ,__A ,standard_warn=__A )
_lowercase = kwargs['min_value']
_lowercase = list(__A )
_lowercase = [p.clone().detach() for p in parameters]
if kwargs.get('device' ,__A ) is not None:
_lowercase = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' ,'1.0.0' ,__A ,standard_warn=__A )
self.to(device=kwargs['device'] )
_lowercase = None
_lowercase = decay
_lowercase = min_decay
_lowercase = update_after_step
_lowercase = use_ema_warmup
_lowercase = inv_gamma
_lowercase = power
_lowercase = 0
_lowercase = None # set in `step()`
_lowercase = model_cls
_lowercase = model_config
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ,__A : int ,__A : List[Any] ) -> "EMAModel":
_lowercase , _lowercase = model_cls.load_config(__A ,return_unused_kwargs=__A )
_lowercase = model_cls.from_pretrained(__A )
_lowercase = cls(model.parameters() ,model_cls=__A ,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def __UpperCAmelCase ( self : Optional[Any] ,__A : Optional[Any] ) -> Union[str, Any]:
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
_lowercase = self.model_cls.from_config(self.model_config )
_lowercase = self.state_dict()
state_dict.pop('shadow_params' ,__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def __UpperCAmelCase ( self : Optional[Any] ,__A : int ) -> float:
_lowercase = max(0 ,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowercase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowercase = (1 + step) / (10 + step)
_lowercase = min(__A ,self.decay )
# make sure decay is not smaller than min_decay
_lowercase = max(__A ,self.min_decay )
return cur_decay_value
@torch.no_grad()
def __UpperCAmelCase ( self : Optional[int] ,__A : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
if isinstance(__A ,torch.nn.Module ):
_lowercase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' ,'1.0.0' ,__A ,standard_warn=__A ,)
_lowercase = parameters.parameters()
_lowercase = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowercase = self.get_decay(self.optimization_step )
_lowercase = decay
_lowercase = 1 - decay
_lowercase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params ,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowercase = deepspeed.zero.GatheredParameters(__A ,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def __UpperCAmelCase ( self : Any ,__A : Iterable[torch.nn.Parameter] ) -> None:
_lowercase = list(__A )
for s_param, param in zip(self.shadow_params ,__A ):
param.data.copy_(s_param.to(param.device ).data )
def __UpperCAmelCase ( self : Any ,__A : Optional[int]=None ,__A : Union[str, Any]=None ) -> None:
_lowercase = [
p.to(device=__A ,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def __UpperCAmelCase ( self : Optional[Any] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __UpperCAmelCase ( self : Dict ,__A : Iterable[torch.nn.Parameter] ) -> None:
_lowercase = [param.detach().cpu().clone() for param in parameters]
def __UpperCAmelCase ( self : int ,__A : Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params ,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowercase = None
def __UpperCAmelCase ( self : List[str] ,__A : dict ) -> None:
_lowercase = copy.deepcopy(__A )
_lowercase = state_dict.get('decay' ,self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
_lowercase = state_dict.get('min_decay' ,self.min_decay )
if not isinstance(self.min_decay ,__A ):
raise ValueError('Invalid min_decay' )
_lowercase = state_dict.get('optimization_step' ,self.optimization_step )
if not isinstance(self.optimization_step ,__A ):
raise ValueError('Invalid optimization_step' )
_lowercase = state_dict.get('update_after_step' ,self.update_after_step )
if not isinstance(self.update_after_step ,__A ):
raise ValueError('Invalid update_after_step' )
_lowercase = state_dict.get('use_ema_warmup' ,self.use_ema_warmup )
if not isinstance(self.use_ema_warmup ,__A ):
raise ValueError('Invalid use_ema_warmup' )
_lowercase = state_dict.get('inv_gamma' ,self.inv_gamma )
if not isinstance(self.inv_gamma ,(float, int) ):
raise ValueError('Invalid inv_gamma' )
_lowercase = state_dict.get('power' ,self.power )
if not isinstance(self.power ,(float, int) ):
raise ValueError('Invalid power' )
_lowercase = state_dict.get('shadow_params' ,__A )
if shadow_params is not None:
_lowercase = shadow_params
if not isinstance(self.shadow_params ,__A ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(__A ,torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' ) | 67 |
import requests
lowercase_ = """YOUR API KEY"""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = giphy_api_key ) -> list:
lowercase__ = '+'.join(query.split() )
lowercase__ = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 235 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
for attribute in key.split('.'):
UpperCamelCase__ : Optional[Any] = getattr(_UpperCamelCase , _UpperCamelCase)
if weight_type is not None:
UpperCamelCase__ : int = getattr(_UpperCamelCase , _UpperCamelCase).shape
else:
UpperCamelCase__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Tuple = value
elif weight_type == "weight_g":
UpperCamelCase__ : Tuple = value
elif weight_type == "weight_v":
UpperCamelCase__ : Dict = value
elif weight_type == "bias":
UpperCamelCase__ : Optional[Any] = value
else:
UpperCamelCase__ : Tuple = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Dict = fairseq_model.state_dict()
UpperCamelCase__ : Optional[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : int = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Optional[Any] = True
if "*" in mapped_key:
UpperCamelCase__ : int = name.split(_UpperCamelCase)[0].split('.')[-2]
UpperCamelCase__ : Dict = mapped_key.replace('*' , _UpperCamelCase)
if "weight_g" in name:
UpperCamelCase__ : List[str] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : List[Any] = 'weight_v'
elif "weight" in name:
UpperCamelCase__ : Dict = 'weight'
elif "bias" in name:
UpperCamelCase__ : Optional[Any] = 'bias'
else:
UpperCamelCase__ : Optional[int] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
continue
if not is_used:
unused_weights.append(_UpperCamelCase)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Optional[int] = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : Union[str, Any] = name.split('.')
UpperCamelCase__ : int = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(_UpperCamelCase)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__ : List[Any] = SEWConfig()
if is_finetuned:
UpperCamelCase__ : Optional[int] = model.wav_encoder.wav_model.cfg
else:
UpperCamelCase__ : Any = model.cfg
UpperCamelCase__ : List[str] = fs_config.conv_bias
UpperCamelCase__ : Optional[int] = eval(fs_config.conv_feature_layers)
UpperCamelCase__ : Tuple = [x[0] for x in conv_layers]
UpperCamelCase__ : List[str] = [x[1] for x in conv_layers]
UpperCamelCase__ : str = [x[2] for x in conv_layers]
UpperCamelCase__ : Any = 'gelu'
UpperCamelCase__ : Optional[int] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
UpperCamelCase__ : Tuple = 0.0
UpperCamelCase__ : Dict = fs_config.activation_fn.name
UpperCamelCase__ : Tuple = fs_config.encoder_embed_dim
UpperCamelCase__ : Union[str, Any] = 0.02
UpperCamelCase__ : Any = fs_config.encoder_ffn_embed_dim
UpperCamelCase__ : List[str] = 1e-5
UpperCamelCase__ : int = fs_config.encoder_layerdrop
UpperCamelCase__ : List[Any] = fs_config.encoder_attention_heads
UpperCamelCase__ : Union[str, Any] = fs_config.conv_pos_groups
UpperCamelCase__ : Optional[Any] = fs_config.conv_pos
UpperCamelCase__ : str = len(_UpperCamelCase)
UpperCamelCase__ : List[Any] = fs_config.encoder_layers
UpperCamelCase__ : Optional[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCamelCase__ : Optional[int] = model.cfg
UpperCamelCase__ : int = fs_config.final_dropout
UpperCamelCase__ : Dict = fs_config.layerdrop
UpperCamelCase__ : Optional[int] = fs_config.activation_dropout
UpperCamelCase__ : Optional[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCamelCase__ : Union[str, Any] = fs_config.attention_dropout
UpperCamelCase__ : Optional[int] = fs_config.dropout_input
UpperCamelCase__ : Dict = fs_config.dropout
UpperCamelCase__ : str = fs_config.mask_channel_length
UpperCamelCase__ : Dict = fs_config.mask_channel_prob
UpperCamelCase__ : Union[str, Any] = fs_config.mask_length
UpperCamelCase__ : Tuple = fs_config.mask_prob
UpperCamelCase__ : Dict = 'Wav2Vec2FeatureExtractor'
UpperCamelCase__ : List[str] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Union[str, Any]:
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
if config_path is not None:
UpperCamelCase__ : Union[str, Any] = SEWConfig.from_pretrained(_UpperCamelCase)
else:
UpperCamelCase__ : List[Any] = convert_config(model[0] , _UpperCamelCase)
UpperCamelCase__ : Dict = model[0].eval()
UpperCamelCase__ : List[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
if is_finetuned:
if dict_path:
UpperCamelCase__ : List[Any] = Dictionary.load(_UpperCamelCase)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : int = target_dict.pad_index
UpperCamelCase__ : Any = target_dict.bos_index
UpperCamelCase__ : Dict = target_dict.pad_index
UpperCamelCase__ : Tuple = target_dict.bos_index
UpperCamelCase__ : str = target_dict.eos_index
UpperCamelCase__ : Union[str, Any] = len(target_dict.symbols)
UpperCamelCase__ : Union[str, Any] = os.path.join(_UpperCamelCase , 'vocab.json')
if not os.path.isdir(_UpperCamelCase):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCamelCase))
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase)
with open(_UpperCamelCase , 'w' , encoding='utf-8') as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase)
UpperCamelCase__ : str = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCamelCase , )
UpperCamelCase__ : List[Any] = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase)
processor.save_pretrained(_UpperCamelCase)
UpperCamelCase__ : List[Any] = SEWForCTC(_UpperCamelCase)
else:
UpperCamelCase__ : List[str] = SEWModel(_UpperCamelCase)
feature_extractor.save_pretrained(_UpperCamelCase)
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
hf_model.save_pretrained(_UpperCamelCase)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 720 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
__magic_name__ : int = MODEL_FOR_MASKED_LM_MAPPING
__magic_name__ : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
A_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''},
] , )
A_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 3_8_0_1_5,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 2_5_5_0_6,
'''token_str''': ''' accuser''',
},
] , )
A_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
A_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
A_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
A_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
] , )
A_ = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
A_ = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a__ , a__ )
@slow
@require_torch
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(a__ )
@slow
@require_tf
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(a__ )
def lowerCAmelCase_ ( self , a__ ) -> Tuple:
'''simple docstring'''
A_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(a__ ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_08, '''token''': 6_1_0, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_07, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''},
] , )
A_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(a__ ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_51,
'''token''': 2_2_0_1,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_14,
'''token''': 1_2_7_9_0,
'''token_str''': ''' Lyon''',
},
] , )
A_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(a__ ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_00, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_00, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
A_ = None
A_ = None
self.run_pipeline_test(a__ , [] )
@require_tf
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
A_ = None
A_ = None
self.run_pipeline_test(a__ , [] )
def lowerCAmelCase_ ( self , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = [
F"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def lowerCAmelCase_ ( self , a__ , a__ ) -> Any:
'''simple docstring'''
A_ = fill_masker.tokenizer
A_ = fill_masker.model
A_ = fill_masker(
F"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = fill_masker([F"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = fill_masker([F"This is a {tokenizer.mask_token}", F"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a__ , [
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
] , )
with self.assertRaises(a__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a__ ):
fill_masker('''This is''' )
self.run_test_top_k(a__ , a__ )
self.run_test_targets(a__ , a__ )
self.run_test_top_k_targets(a__ , a__ )
self.fill_mask_with_duplicate_targets_and_top_k(a__ , a__ )
self.fill_mask_with_multiple_masks(a__ , a__ )
def lowerCAmelCase_ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
A_ = tokenizer.get_vocab()
A_ = sorted(vocab.keys() )[:2]
# Pipeline argument
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ , targets=a__ )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , a__ )
A_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(a__ ) )
# Call argument
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=a__ )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , a__ )
A_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(a__ ) )
# Score equivalence
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=a__ )
A_ = [top_mask['''token_str'''] for top_mask in outputs]
A_ = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a__ ) == set(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=a__ )
A_ = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
# Raises with invalid
with self.assertRaises(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=[''''''] )
with self.assertRaises(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets='''''' )
def lowerCAmelCase_ ( self , a__ , a__ ) -> int:
'''simple docstring'''
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ , top_k=2 )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
def lowerCAmelCase_ ( self , a__ , a__ ) -> str:
'''simple docstring'''
A_ = tokenizer.get_vocab()
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
# top_k=2, ntargets=3
A_ = sorted(vocab.keys() )[:3]
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 , targets=a__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A_ = [el['''token_str'''] for el in sorted(a__ , key=lambda a__ : x["score"] , reverse=a__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a__ ).issubset(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=3 , targets=a__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = tokenizer.get_vocab()
# String duplicates + id duplicates
A_ = sorted(vocab.keys() )[:3]
A_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A_ = fill_masker(F"My name is {tokenizer.mask_token}" , targets=a__ , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a__ ) , 3 )
def lowerCAmelCase_ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = fill_masker(
F"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a__ , [
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
] , ) | 141 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = (KDPMaDiscreteScheduler,)
_a = 10
def __A ( self , **A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A )
return config
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=A , beta_end=A )
def __A ( self ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
__magic_name__ = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(A , A )
__magic_name__ = model(A , A )
__magic_name__ = scheduler.step(A , A , A )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(A ) )
__magic_name__ = torch.mean(torch.abs(A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(A , A )
__magic_name__ = model(A , A )
__magic_name__ = scheduler.step(A , A , A )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(A ) )
__magic_name__ = torch.mean(torch.abs(A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def __A ( self ) -> Any:
'''simple docstring'''
if torch_device == "mps":
return
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(A , A )
__magic_name__ = model(A , A )
__magic_name__ = scheduler.step(A , A , A )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(A ) )
__magic_name__ = torch.mean(torch.abs(A ) )
if str(A ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3 | 716 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "sentencepiece.bpe.model"}
__A = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__A = {
"facebook/nllb-200-distilled-600M": 10_24,
}
# fmt: off
__A = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] = ['input_ids', 'attention_mask']
lowerCamelCase : List[int] = []
lowerCamelCase : List[int] = []
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any="<s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : Optional[int]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[int]=False , **__SCREAMING_SNAKE_CASE : Any , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
__UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase =legacy_behaviour
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase ={"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase =1
__UpperCAmelCase =len(self.sp_model )
__UpperCAmelCase ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
__UpperCAmelCase ={v: k for k, v in self.lang_code_to_id.items()}
__UpperCAmelCase =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__UpperCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
__UpperCAmelCase =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__UpperCAmelCase =src_lang if src_lang is not None else """eng_Latn"""
__UpperCAmelCase =self.lang_code_to_id[self._src_lang]
__UpperCAmelCase =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase =self.__dict__.copy()
__UpperCAmelCase =None
__UpperCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
__UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase ={}
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a ( self : Any ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self : str ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> None:
__UpperCAmelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[1] * len(self.prefix_tokens )
__UpperCAmelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase =[self.sep_token_id]
__UpperCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] , __SCREAMING_SNAKE_CASE : Optional[str] , **__SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__UpperCAmelCase =src_lang
__UpperCAmelCase =self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tgt_lang_id
return inputs
def _a ( self : Any ) -> List[Any]:
__UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase =self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> int:
__UpperCAmelCase ="""""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def _a ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str = "eng_Latn" , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "fra_Latn" , **__SCREAMING_SNAKE_CASE : Dict , ) -> BatchEncoding:
__UpperCAmelCase =src_lang
__UpperCAmelCase =tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : Any ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> None:
__UpperCAmelCase =self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__UpperCAmelCase =[]
__UpperCAmelCase =[self.eos_token_id, self.cur_lang_code]
else:
__UpperCAmelCase =[self.cur_lang_code]
__UpperCAmelCase =[self.eos_token_id]
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str ) -> None:
__UpperCAmelCase =self.lang_code_to_id[lang]
if self.legacy_behaviour:
__UpperCAmelCase =[]
__UpperCAmelCase =[self.eos_token_id, self.cur_lang_code]
else:
__UpperCAmelCase =[self.cur_lang_code]
__UpperCAmelCase =[self.eos_token_id]
| 68 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase = 256
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =["""melgan"""]
def __init__( self : Dict , _a : SpectrogramNotesEncoder , _a : SpectrogramContEncoder , _a : TaFilmDecoder , _a : DDPMScheduler , _a : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
__lowerCamelCase : Any = math.log(1e-5 ) # Matches MelGAN training.
__lowerCamelCase : List[Any] = 4.0 # Largest value for most examples
__lowerCamelCase : Tuple = 128
self.register_modules(
notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , )
def _lowercase ( self : Tuple , _a : int , _a : List[Any]=(-1.0, 1.0) , _a : Any=False ) -> Dict:
__lowerCamelCase ,__lowerCamelCase : Any = output_range
if clip:
__lowerCamelCase : List[Any] = torch.clip(_a , self.min_value , self.max_value )
# Scale to [0, 1].
__lowerCamelCase : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _lowercase ( self : Dict , _a : List[str] , _a : int=(-1.0, 1.0) , _a : Dict=False ) -> List[str]:
__lowerCamelCase ,__lowerCamelCase : List[Any] = input_range
__lowerCamelCase : Optional[Any] = torch.clip(_a , _a , _a ) if clip else outputs
# Scale to [0, 1].
__lowerCamelCase : str = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _lowercase ( self : int , _a : Dict , _a : List[str] , _a : Tuple ) -> Any:
__lowerCamelCase : Tuple = input_tokens > 0
__lowerCamelCase ,__lowerCamelCase : int = self.notes_encoder(
encoder_input_tokens=_a , encoder_inputs_mask=_a )
__lowerCamelCase ,__lowerCamelCase : Tuple = self.continuous_encoder(
encoder_inputs=_a , encoder_inputs_mask=_a )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _lowercase ( self : Tuple , _a : Tuple , _a : List[Any] , _a : int ) -> Dict:
__lowerCamelCase : Any = noise_time
if not torch.is_tensor(_a ):
__lowerCamelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
__lowerCamelCase : List[str] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase : Tuple = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase : int = self.decoder(
encodings_and_masks=_a , decoder_input_tokens=_a , decoder_noise_time=_a )
return logits
@torch.no_grad()
def __call__( self : Optional[int] , _a : List[List[int]] , _a : Optional[torch.Generator] = None , _a : int = 100 , _a : bool = True , _a : str = "numpy" , _a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _a : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_a )}.' )
__lowerCamelCase : Optional[int] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__lowerCamelCase : Dict = np.zeros([1, 0, self.n_dims] , np.floataa )
__lowerCamelCase : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_a , device=self.device )
for i, encoder_input_tokens in enumerate(_a ):
if i == 0:
__lowerCamelCase : List[str] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__lowerCamelCase : List[Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_a , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__lowerCamelCase : int = ones
__lowerCamelCase : int = self.scale_features(
_a , output_range=[-1.0, 1.0] , clip=_a )
__lowerCamelCase : Tuple = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_a , continuous_mask=_a , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__lowerCamelCase : Optional[int] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_a , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_a )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowerCamelCase : List[Any] = self.decode(
encodings_and_masks=_a , input_tokens=_a , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__lowerCamelCase : Optional[int] = self.scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__lowerCamelCase : List[Any] = self.scale_to_features(_a , input_range=[-1.0, 1.0] )
__lowerCamelCase : Union[str, Any] = mel[:1]
__lowerCamelCase : Union[str, Any] = mel.cpu().float().numpy()
__lowerCamelCase : Dict = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a )
logger.info('Generated segment' , _a )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__lowerCamelCase : Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__lowerCamelCase : List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_a )
| 459 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class a__ ( _lowercase ):
__magic_name__ : Any = '''dpt'''
def __init__(self : Union[str, Any], __UpperCAmelCase : Union[str, Any]=768, __UpperCAmelCase : Tuple=12, __UpperCAmelCase : Tuple=12, __UpperCAmelCase : str=3072, __UpperCAmelCase : Optional[int]="gelu", __UpperCAmelCase : List[Any]=0.0, __UpperCAmelCase : Dict=0.0, __UpperCAmelCase : Dict=0.02, __UpperCAmelCase : Any=1e-12, __UpperCAmelCase : Optional[int]=384, __UpperCAmelCase : Optional[Any]=16, __UpperCAmelCase : Dict=3, __UpperCAmelCase : Optional[Any]=False, __UpperCAmelCase : Dict=True, __UpperCAmelCase : Any=[2, 5, 8, 11], __UpperCAmelCase : Optional[Any]="project", __UpperCAmelCase : Tuple=[4, 2, 1, 0.5], __UpperCAmelCase : List[Any]=[96, 192, 384, 768], __UpperCAmelCase : Optional[Any]=256, __UpperCAmelCase : Any=-1, __UpperCAmelCase : Optional[int]=False, __UpperCAmelCase : Optional[Any]=True, __UpperCAmelCase : Dict=0.4, __UpperCAmelCase : Dict=255, __UpperCAmelCase : List[str]=0.1, __UpperCAmelCase : Optional[Any]=[1, 1024, 24, 24], __UpperCAmelCase : str=[0, 1], __UpperCAmelCase : List[str]=None, **__UpperCAmelCase : Dict, ) -> Tuple:
"""simple docstring"""
super().__init__(**A_ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
SCREAMING_SNAKE_CASE : List[str] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
SCREAMING_SNAKE_CASE : List[str] = BitConfig(**A_ )
elif isinstance(A_, A_ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
SCREAMING_SNAKE_CASE : str = BitConfig(**A_ )
elif isinstance(A_, A_ ):
SCREAMING_SNAKE_CASE : int = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
SCREAMING_SNAKE_CASE : Any = backbone_featmap_shape
SCREAMING_SNAKE_CASE : str = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Any = patch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : Dict = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = readout_type
SCREAMING_SNAKE_CASE : Tuple = reassemble_factors
SCREAMING_SNAKE_CASE : Optional[int] = neck_hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = fusion_hidden_size
SCREAMING_SNAKE_CASE : int = head_in_index
SCREAMING_SNAKE_CASE : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE : Any = use_auxiliary_head
SCREAMING_SNAKE_CASE : Dict = auxiliary_loss_weight
SCREAMING_SNAKE_CASE : str = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE : List[Any] = semantic_classifier_dropout
def lowercase__ (self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE : List[str] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type
return output
| 700 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Optional[Any], *__UpperCAmelCase : List[Any], **__UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : int, **__UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : List[str], *__UpperCAmelCase : str, **__UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[str] = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Union[str, Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : List[Any], **__UpperCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : int, **__UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : str, *__UpperCAmelCase : str, **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[str] = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : List[str], **__UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Tuple, **__UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : str, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[Any] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : int, **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Any, *__UpperCAmelCase : str, **__UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Any, **__UpperCAmelCase : str ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : Dict, **__UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : List[str], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : Tuple, **__UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : str ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Any, **__UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : int ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : int ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
| 355 | 0 |
"""simple docstring"""
from manim import *
class UpperCAmelCase_ ( snake_case ):
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Tuple = Rectangle(height=0.5 , width=0.5 )
__lowercase : Tuple = Rectangle(height=0.2_5 , width=0.2_5 )
__lowercase : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowercase : Optional[Any] = [mem.copy() for i in range(6 )]
__lowercase : Optional[int] = [mem.copy() for i in range(6 )]
__lowercase : Dict = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__lowercase : str = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__lowercase : List[str] = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__lowercase : Any = Text('''CPU''' , font_size=24 )
__lowercase : Tuple = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase_ )
__lowercase : Optional[Any] = [mem.copy() for i in range(4 )]
__lowercase : List[Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__lowercase : Optional[Any] = Text('''GPU''' , font_size=24 )
__lowercase : Dict = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase_ )
__lowercase : Any = [mem.copy() for i in range(6 )]
__lowercase : Dict = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__lowercase : Dict = Text('''Model''' , font_size=24 )
__lowercase : List[str] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase_ )
__lowercase : int = []
__lowercase : Dict = []
__lowercase : Optional[Any] = []
for i, rect in enumerate(UpperCamelCase_ ):
rect.set_stroke(UpperCamelCase_ )
__lowercase : List[str] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCamelCase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCamelCase_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCamelCase_ , buff=0.0 )
self.add(UpperCamelCase_ )
model_cpu_arr.append(UpperCamelCase_ )
self.add(*UpperCamelCase_ , *UpperCamelCase_ , *UpperCamelCase_ )
__lowercase : Optional[int] = [mem.copy() for i in range(6 )]
__lowercase : List[str] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__lowercase : List[str] = Text('''Loaded Checkpoint''' , font_size=24 )
__lowercase : Dict = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCamelCase_ )
__lowercase : List[Any] = []
__lowercase : str = []
for i, rect in enumerate(UpperCamelCase_ ):
__lowercase : Dict = fill.copy().set_fill(UpperCamelCase_ , opacity=0.7 )
target.move_to(UpperCamelCase_ )
ckpt_arr.append(UpperCamelCase_ )
__lowercase : Dict = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCamelCase_ )
self.add(*UpperCamelCase_ , *UpperCamelCase_ )
__lowercase : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase : List[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Any = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCamelCase_ )
__lowercase : int = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
__lowercase : int = [meta_mem.copy() for i in range(6 )]
__lowercase : Optional[int] = [meta_mem.copy() for i in range(6 )]
__lowercase : int = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__lowercase : Any = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__lowercase : List[str] = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__lowercase : Tuple = Text('''Disk''' , font_size=24 )
__lowercase : Tuple = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(UpperCamelCase_ , run_time=3 ) , Write(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) )
__lowercase : Tuple = []
for i, rect in enumerate(UpperCamelCase_ ):
__lowercase : Any = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCamelCase_ , run_time=1.5 ) )
self.play(*UpperCamelCase_ )
self.play(FadeOut(UpperCamelCase_ ) )
__lowercase : List[Any] = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ , run_time=3 ) )
self.play(
FadeOut(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , *UpperCamelCase_ ) , )
self.wait()
| 76 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[Any] = 1_6
A : str = 3_2
def UpperCamelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
__magic_name__ , padding="""longest""" , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Union[str, Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __magic_name__ ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__magic_name__ )
lowercase__ , lowercase__ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
lowercase__ = model(**__magic_name__ )
lowercase__ = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**__magic_name__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __magic_name__ )
def UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__magic_name__ , default=__magic_name__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__magic_name__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 15 | 0 |
'''simple docstring'''
import math
import random
def __magic_name__( lowerCamelCase, lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_UpperCAmelCase : Union[str, Any] = 0.02
def __magic_name__( lowerCamelCase, lowerCamelCase ):
__lowerCAmelCase = float(2 * (random.randint(1, 1_0_0 )) - 1 )
for _ in range(A_ ):
# Forward propagation
__lowerCAmelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__lowerCAmelCase = (expected / 1_0_0) - layer_a
# Error delta
__lowerCAmelCase = layer_1_error * sigmoid_function(A_, A_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : List[Any] = int(input("""Expected value: """))
_UpperCAmelCase : Tuple = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 710 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_UpperCAmelCase : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCAmelCase = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''')
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
return new_state_dict
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = ''''''
if is_panoptic:
__lowerCAmelCase = '''conditional_detr.'''
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""")
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""")
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:2_5_6, :]
__lowerCAmelCase = in_proj_bias[:2_5_6]
__lowerCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
__lowerCAmelCase = in_proj_bias[2_5_6:5_1_2]
__lowerCAmelCase = in_proj_weight[-2_5_6:, :]
__lowerCAmelCase = in_proj_bias[-2_5_6:]
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowerCAmelCase = '''resnet101'''
if "dc5" in model_name:
__lowerCAmelCase = True
__lowerCAmelCase = '''panoptic''' in model_name
if is_panoptic:
__lowerCAmelCase = 2_5_0
else:
__lowerCAmelCase = 9_1
__lowerCAmelCase = '''huggingface/label-files'''
__lowerCAmelCase = '''coco-detection-id2label.json'''
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type='''dataset'''), '''r'''))
__lowerCAmelCase = {int(lowerCamelCase): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# load image processor
__lowerCAmelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowerCAmelCase = ConditionalDetrImageProcessor(format=lowerCamelCase)
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCamelCase, return_tensors='''pt''')
__lowerCAmelCase = encoding['''pixel_values''']
logger.info(F"""Converting model {model_name}...""")
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('''DeppMeng/ConditionalDETR''', lowerCamelCase, pretrained=lowerCamelCase).eval()
__lowerCAmelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowerCAmelCase = '''conditional_detr.''' + src
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = rename_backbone_keys(lowerCamelCase)
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase, is_panoptic=lowerCamelCase)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''')
and not key.startswith('''class_labels_classifier''')
and not key.startswith('''bbox_predictor''')
):
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
elif key.startswith('''bbox_attention''') or key.startswith('''mask_head'''):
continue
else:
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
else:
if not key.startswith('''class_labels_classifier''') and not key.startswith('''bbox_predictor'''):
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase = ConditionalDetrForSegmentation(lowerCamelCase) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
model.push_to_hub(repo_id=lowerCamelCase, organization='''DepuMeng''', commit_message='''Add model''')
# verify our conversion
__lowerCAmelCase = conditional_detr(lowerCamelCase)
__lowerCAmelCase = model(lowerCamelCase)
assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1E-4)
assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1E-4)
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1E-4)
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""")
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
image_processor.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 474 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCamelCase__ = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def __UpperCAmelCase ( cls : Tuple) -> List[str]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-model-flax")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org")
except HTTPError:
pass
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
_UpperCamelCase = FlaxBertModel(lowercase_)
model.push_to_hub("test-model-flax" , use_auth_token=self._token)
_UpperCamelCase = FlaxBertModel.from_pretrained(f'{USER}/test-model-flax')
_UpperCamelCase = flatten_dict(unfreeze(model.params))
_UpperCamelCase = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=f'{key} not identical')
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ , repo_id="test-model-flax" , push_to_hub=lowercase_ , use_auth_token=self._token)
_UpperCamelCase = FlaxBertModel.from_pretrained(f'{USER}/test-model-flax')
_UpperCamelCase = flatten_dict(unfreeze(model.params))
_UpperCamelCase = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=f'{key} not identical')
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
_UpperCamelCase = FlaxBertModel(lowercase_)
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token)
_UpperCamelCase = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
_UpperCamelCase = flatten_dict(unfreeze(model.params))
_UpperCamelCase = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=f'{key} not identical')
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowercase_ , repo_id="valid_org/test-model-flax-org" , push_to_hub=lowercase_ , use_auth_token=self._token)
_UpperCamelCase = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
_UpperCamelCase = flatten_dict(unfreeze(model.params))
_UpperCamelCase = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=f'{key} not identical')
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = True
_UpperCamelCase = flatten_dict(modela.params )
_UpperCamelCase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
_UpperCamelCase = False
return models_are_equal
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
_UpperCamelCase = FlaxBertModel(lowercase_)
_UpperCamelCase = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_))
with self.assertRaises(lowercase_):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowercase_)
_UpperCamelCase = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_)
self.assertTrue(check_models_equal(lowercase_ , lowercase_))
def __UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
_UpperCamelCase = FlaxBertModel(lowercase_)
_UpperCamelCase = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_) , max_shard_size="10KB")
with self.assertRaises(lowercase_):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowercase_)
_UpperCamelCase = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_)
self.assertTrue(check_models_equal(lowercase_ , lowercase_))
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = "bert"
_UpperCamelCase = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(lowercase_):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowercase_)
_UpperCamelCase = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_)
self.assertIsNotNone(lowercase_)
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = "bert"
_UpperCamelCase = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(lowercase_):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowercase_)
_UpperCamelCase = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_)
self.assertIsNotNone(lowercase_)
| 547 | import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase_ : UNetaDModel , lowercase_ : UNetaDModel , lowercase_ : DDPMScheduler , lowercase_ : Any , ) -> Any:
"""simple docstring"""
super().__init__()
_UpperCamelCase = value_function
_UpperCamelCase = unet
_UpperCamelCase = scheduler
_UpperCamelCase = env
_UpperCamelCase = env.get_dataset()
_UpperCamelCase = {}
for key in self.data.keys():
try:
_UpperCamelCase = self.data[key].mean()
except: # noqa: E722
pass
_UpperCamelCase = {}
for key in self.data.keys():
try:
_UpperCamelCase = self.data[key].std()
except: # noqa: E722
pass
_UpperCamelCase = env.observation_space.shape[0]
_UpperCamelCase = env.action_space.shape[0]
def __UpperCAmelCase ( self : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __UpperCAmelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if type(lowercase_) is dict:
return {k: self.to_torch(lowercase_) for k, v in x_in.items()}
elif torch.is_tensor(lowercase_):
return x_in.to(self.unet.device)
return torch.tensor(lowercase_ , device=self.unet.device)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
_UpperCamelCase = val.clone()
return x_in
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = x.shape[0]
_UpperCamelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
_UpperCamelCase = torch.full((batch_size,) , lowercase_ , device=self.unet.device , dtype=torch.long)
for _ in range(lowercase_):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_UpperCamelCase = self.value_function(x.permute(0 , 2 , 1) , lowercase_).sample
_UpperCamelCase = torch.autograd.grad([y.sum()] , [x])[0]
_UpperCamelCase = self.scheduler._get_variance(lowercase_)
_UpperCamelCase = torch.exp(0.5 * posterior_variance)
_UpperCamelCase = model_std * grad
_UpperCamelCase = 0
_UpperCamelCase = x.detach()
_UpperCamelCase = x + scale * grad
_UpperCamelCase = self.reset_xa(lowercase_ , lowercase_ , self.action_dim)
_UpperCamelCase = self.unet(x.permute(0 , 2 , 1) , lowercase_).sample.permute(0 , 2 , 1)
# TODO: verify deprecation of this kwarg
_UpperCamelCase = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , predict_epsilon=lowercase_)["prev_sample"]
# apply conditions to the trajectory (set the initial state)
_UpperCamelCase = self.reset_xa(lowercase_ , lowercase_ , self.action_dim)
_UpperCamelCase = self.to_torch(lowercase_)
return x, y
def __call__( self : Optional[int] , lowercase_ : str , lowercase_ : int=64 , lowercase_ : Any=32 , lowercase_ : List[Any]=2 , lowercase_ : str=0.1) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.normalize(lowercase_ , "observations")
_UpperCamelCase = obs[None].repeat(lowercase_ , axis=0)
_UpperCamelCase = {0: self.to_torch(lowercase_)}
_UpperCamelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_UpperCamelCase = randn_tensor(lowercase_ , device=self.unet.device)
_UpperCamelCase = self.reset_xa(lowercase_ , lowercase_ , self.action_dim)
_UpperCamelCase = self.to_torch(lowercase_)
# run the diffusion process
_UpperCamelCase , _UpperCamelCase = self.run_diffusion(lowercase_ , lowercase_ , lowercase_ , lowercase_)
# sort output trajectories by value
_UpperCamelCase = y.argsort(0 , descending=lowercase_).squeeze()
_UpperCamelCase = x[sorted_idx]
_UpperCamelCase = sorted_values[:, :, : self.action_dim]
_UpperCamelCase = actions.detach().cpu().numpy()
_UpperCamelCase = self.de_normalize(lowercase_ , key="actions")
# select the action with the highest value
if y is not None:
_UpperCamelCase = 0
else:
# if we didn't run value guiding, select a random action
_UpperCamelCase = np.random.randint(0 , lowercase_)
_UpperCamelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 547 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__snake_case :Dict =re.compile(r'\b(a|an|the)\b', re.UNICODE)
__snake_case :Optional[int] =None
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=SCREAMING_SNAKE_CASE_ , default=1.0 , help='Predict \"\" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=SCREAMING_SNAKE_CASE_ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
A = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
A = bool(qa['answers']['text'] )
return qid_to_has_ans
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
def remove_articles(lowerCAmelCase__ : Optional[int] ):
return ARTICLES_REGEX.sub(' ' , SCREAMING_SNAKE_CASE_ )
def white_space_fix(lowerCAmelCase__ : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ : Dict ):
A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def lowerCamelCase_ ( lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
if not s:
return []
return normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ ) )
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
A = get_tokens(SCREAMING_SNAKE_CASE_ )
A = get_tokens(SCREAMING_SNAKE_CASE_ )
A = collections.Counter(SCREAMING_SNAKE_CASE_ ) & collections.Counter(SCREAMING_SNAKE_CASE_ )
A = sum(common.values() )
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
A = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
A = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
A = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
A = {}
A = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
A = qa['id']
A = [t for t in qa['answers']['text'] if normalize_answer(SCREAMING_SNAKE_CASE_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
A = ['']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
A = preds[qid]
# Take max over all gold answers
A = max(compute_exact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for a in gold_answers )
A = max(compute_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
A = {}
for qid, s in scores.items():
A = na_probs[qid] > na_prob_thresh
if pred_na:
A = float(not qid_to_has_ans[qid] )
else:
A = s
return new_scores
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=None ) -> Tuple:
'''simple docstring'''
if not qid_list:
A = len(SCREAMING_SNAKE_CASE_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
A = len(SCREAMING_SNAKE_CASE_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for k in new_eval:
A = new_eval[k]
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
plt.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(SCREAMING_SNAKE_CASE_ )
plt.savefig(SCREAMING_SNAKE_CASE_ )
plt.clf()
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[Any]=None ) -> str:
'''simple docstring'''
A = sorted(SCREAMING_SNAKE_CASE_ , key=lambda lowerCAmelCase__ : na_probs[k] )
A = 0.0
A = 1.0
A = 0.0
A = [1.0]
A = [0.0]
A = 0.0
for i, qid in enumerate(SCREAMING_SNAKE_CASE_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
A = true_pos / float(i + 1 )
A = true_pos / float(SCREAMING_SNAKE_CASE_ )
if i == len(SCREAMING_SNAKE_CASE_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(SCREAMING_SNAKE_CASE_ )
recalls.append(SCREAMING_SNAKE_CASE_ )
if out_image:
plot_pr_curve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"ap": 100.0 * avg_prec}
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
if out_image_dir and not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
A = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
A = make_precision_recall_eval(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , out_image=os.path.join(SCREAMING_SNAKE_CASE_ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
A = make_precision_recall_eval(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , out_image=os.path.join(SCREAMING_SNAKE_CASE_ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
A = {k: float(SCREAMING_SNAKE_CASE_ ) for k, v in qid_to_has_ans.items()}
A = make_precision_recall_eval(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , out_image=os.path.join(SCREAMING_SNAKE_CASE_ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'pr_exact' )
merge_eval(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'pr_f1' )
merge_eval(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'pr_oracle' )
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
if not qid_list:
return
A = [na_probs[k] for k in qid_list]
A = np.ones_like(SCREAMING_SNAKE_CASE_ ) / float(len(SCREAMING_SNAKE_CASE_ ) )
plt.hist(SCREAMING_SNAKE_CASE_ , weights=SCREAMING_SNAKE_CASE_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(SCREAMING_SNAKE_CASE_ , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> str:
'''simple docstring'''
A = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
A = num_no_ans
A = cur_score
A = 0.0
A = sorted(SCREAMING_SNAKE_CASE_ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(SCREAMING_SNAKE_CASE_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
A = scores[qid]
else:
if preds[qid]:
A = -1
else:
A = 0
cur_score += diff
if cur_score > best_score:
A = cur_score
A = na_probs[qid]
return 100.0 * best_score / len(SCREAMING_SNAKE_CASE_ ), best_thresh
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A , A = find_best_thresh(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A , A = find_best_thresh(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A = best_exact
A = exact_thresh
A = best_fa
A = fa_thresh
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
with open(OPTS.data_file ) as f:
A = json.load(SCREAMING_SNAKE_CASE_ )
A = dataset_json['data']
with open(OPTS.pred_file ) as f:
A = json.load(SCREAMING_SNAKE_CASE_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
A = json.load(SCREAMING_SNAKE_CASE_ )
else:
A = {k: 0.0 for k in preds}
A = make_qid_to_has_ans(SCREAMING_SNAKE_CASE_ ) # maps qid to True/False
A = [k for k, v in qid_to_has_ans.items() if v]
A = [k for k, v in qid_to_has_ans.items() if not v]
A , A = get_raw_scores(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A = apply_no_ans_threshold(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , OPTS.na_prob_thresh )
A = apply_no_ans_threshold(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , OPTS.na_prob_thresh )
A = make_eval_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if has_ans_qids:
A = make_eval_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , qid_list=SCREAMING_SNAKE_CASE_ )
merge_eval(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'HasAns' )
if no_ans_qids:
A = make_eval_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , qid_list=SCREAMING_SNAKE_CASE_ )
merge_eval(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , OPTS.out_image_dir )
histogram_na_prob(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
print(json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) )
if __name__ == "__main__":
__snake_case :Union[str, Any] =parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main() | 715 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__snake_case :Tuple ='src/transformers'
__snake_case :Dict ='docs/source/en'
__snake_case :Dict ='.'
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Find the start prompt.
A = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
A = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__snake_case :List[Any] ='Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__snake_case :List[Any] =re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__snake_case :List[str] =re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case :Tuple =re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__snake_case :int =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
A = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
A = 2 if text == '✅' or text == '❌' else len(lowerCAmelCase__ )
A = (width - text_length) // 2
A = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
A = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
A = None
if attr_name.endswith('Tokenizer' ):
A = slow_tokenizers
A = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
A = fast_tokenizers
A = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
A = tf_models
A = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
A = flax_models
A = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
A = pt_models
A = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
A = True
break
# Try again after removing the last word in the name
A = ''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
A = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
A = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
A = [len(lowerCAmelCase__ ) + 2 for c in columns]
A = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
A = '|' + '|'.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
A = {True: '✅', False: '❌'}
for name in model_names:
A = model_name_to_prefix[name]
A = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
A , A , A , A = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
A = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
__snake_case :List[Any] =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case :List[Any] =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 224 | 0 |
"""simple docstring"""
class lowercase__ :
'''simple docstring'''
def __init__( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
def lowercase__ ( self : int ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_UpperCAmelCase , " -> " , " -> ".join([str(_UpperCAmelCase ) for j in self.vertex[i]] ) )
def lowercase__ ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_UpperCAmelCase )
else:
# else make a new vertex
UpperCAmelCase_ = [to_vertex]
def lowercase__ ( self : List[Any] ) -> None:
'''simple docstring'''
UpperCAmelCase_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : list ) -> None:
'''simple docstring'''
UpperCAmelCase_ = True
print(_UpperCAmelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 82 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , ) -> int:
lowercase__ : int = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : Tuple = image_size
lowercase__ : str = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : Union[str, Any] = do_resize
lowercase__ : Dict = size
lowercase__ : Optional[Any] = do_normalize
def UpperCAmelCase__( self ) -> Any:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a : List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : str = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase__( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__( self ) -> int:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) )
def UpperCAmelCase__( self ) -> Any:
lowercase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase__ : Optional[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[int] = os.path.join(lowerCamelCase__ , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase__ )
lowercase__ : str = self.image_processing_class.from_json_file(lowerCamelCase__ ).to_dict()
lowercase__ : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase__ )
lowercase__ : Union[str, Any] = self.image_processing_class.from_pretrained(lowerCamelCase__ ).to_dict()
lowercase__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCAmelCase__( self ) -> Dict:
pass
def _lowerCamelCase ( ):
lowercase__ : Tuple = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase__ : Optional[int] = Image.open(dataset[4]["""file"""] )
lowercase__ : Union[str, Any] = Image.open(dataset[5]["""file"""] )
lowercase__ : Optional[int] = [imagea, imagea]
return images
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__( self ) -> str:
lowercase__ : Optional[int] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase__ : int = prepare_images()
# test non-batched
lowercase__ : int = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase__ : Any = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase__ )
# test batched
lowercase__ : str = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase__ : Optional[int] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase__ ) | 200 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=0.999, snake_case="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case):
return math.exp(t * -12.0)
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}")
__snake_case = []
for i in range(__UpperCAmelCase):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase) / alpha_bar_fn(__UpperCAmelCase), __UpperCAmelCase))
return torch.tensor(__UpperCAmelCase, dtype=torch.floataa)
class _A ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCamelCase_ : int = [e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase_ : str = 2
@register_to_config
def __init__( self : int , A_ : int = 1_000 , A_ : float = 0.0_00_85 , A_ : float = 0.0_12 , A_ : str = "linear" , A_ : Optional[Union[np.ndarray, List[float]]] = None , A_ : str = "epsilon" , A_ : Optional[bool] = False , A_ : Optional[bool] = False , A_ : float = 1.0 , A_ : str = "linspace" , A_ : int = 0 , ) -> Dict:
if trained_betas is not None:
__snake_case = torch.tensor(A_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__snake_case = torch.linspace(A_ , A_ , A_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(A_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
__snake_case = betas_for_alpha_bar(A_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
__snake_case = 1.0 - self.betas
__snake_case = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A_ , A_ , A_ )
__snake_case = use_karras_sigmas
def lowercase ( self : Optional[int] , A_ : int , A_ : Dict=None ) -> Dict:
if schedule_timesteps is None:
__snake_case = self.timesteps
__snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__snake_case = 1 if len(A_ ) > 1 else 0
else:
__snake_case = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
__snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase ( self : Optional[int] ) -> str:
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase ( self : Optional[Any] , A_ : torch.FloatTensor , A_ : Union[float, torch.FloatTensor] , ) -> int:
__snake_case = self.index_for_timestep(A_ )
__snake_case = self.sigmas[step_index]
__snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase ( self : int , A_ : int , A_ : Union[str, torch.device] = None , A_ : Optional[int] = None , ) -> int:
__snake_case = num_inference_steps
__snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__snake_case = np.linspace(0 , num_train_timesteps - 1 , A_ , dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(A_ , 0 , -step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
__snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__snake_case = np.log(A_ )
__snake_case = np.interp(A_ , np.arange(0 , len(A_ ) ) , A_ )
if self.config.use_karras_sigmas:
__snake_case = self._convert_to_karras(in_sigmas=A_ , num_inference_steps=self.num_inference_steps )
__snake_case = np.array([self._sigma_to_t(A_ , A_ ) for sigma in sigmas] )
__snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__snake_case = torch.from_numpy(A_ ).to(device=A_ )
__snake_case = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__snake_case = torch.from_numpy(A_ )
__snake_case = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('''mps''' ):
# mps does not support float64
__snake_case = timesteps.to(A_ , dtype=torch.floataa )
else:
__snake_case = timesteps.to(device=A_ )
# empty dt and derivative
__snake_case = None
__snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__snake_case = defaultdict(A_ )
def lowercase ( self : List[str] , A_ : Optional[Any] , A_ : Dict ) -> Any:
__snake_case = np.log(A_ )
# get distribution
__snake_case = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__snake_case = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__snake_case = low_idx + 1
__snake_case = log_sigmas[low_idx]
__snake_case = log_sigmas[high_idx]
# interpolate sigmas
__snake_case = (low - log_sigma) / (low - high)
__snake_case = np.clip(A_ , 0 , 1 )
# transform interpolation to time range
__snake_case = (1 - w) * low_idx + w * high_idx
__snake_case = t.reshape(sigma.shape )
return t
def lowercase ( self : Optional[int] , A_ : torch.FloatTensor , A_ : int ) -> Any:
__snake_case = in_sigmas[-1].item()
__snake_case = in_sigmas[0].item()
__snake_case = 7.0 # 7.0 is the value used in the paper
__snake_case = np.linspace(0 , 1 , A_ )
__snake_case = sigma_min ** (1 / rho)
__snake_case = sigma_max ** (1 / rho)
__snake_case = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowercase ( self : Union[str, Any] ) -> int:
return self.dt is None
def lowercase ( self : Union[str, Any] , A_ : Union[torch.FloatTensor, np.ndarray] , A_ : Union[float, torch.FloatTensor] , A_ : Union[torch.FloatTensor, np.ndarray] , A_ : bool = True , ) -> Tuple:
__snake_case = self.index_for_timestep(A_ )
# advance index counter by 1
__snake_case = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
__snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__snake_case = self.sigmas[step_index - 1]
__snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__snake_case = 0
__snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__snake_case = sigma_hat if self.state_in_first_order else sigma_next
__snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__snake_case = sigma_hat if self.state_in_first_order else sigma_next
__snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__snake_case = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
__snake_case = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__snake_case = sigma_next - sigma_hat
# store for 2nd order step
__snake_case = derivative
__snake_case = dt
__snake_case = sample
else:
# 2. 2nd order / Heun's method
__snake_case = (sample - pred_original_sample) / sigma_next
__snake_case = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__snake_case = self.dt
__snake_case = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def lowercase ( self : Dict , A_ : torch.FloatTensor , A_ : torch.FloatTensor , A_ : torch.FloatTensor , ) -> List[str]:
__snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
__snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__snake_case = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__snake_case = self.timesteps.to(original_samples.device )
__snake_case = timesteps.to(original_samples.device )
__snake_case = [self.index_for_timestep(A_ , A_ ) for t in timesteps]
__snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__snake_case = sigma.unsqueeze(-1 )
__snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ) -> str:
return self.config.num_train_timesteps | 713 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase : Optional[Any] = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 93 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
UpperCamelCase :List[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :List[str] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCamelCase :str = s_dict.pop(SCREAMING_SNAKE_CASE__ )
elif "subsample" in key:
UpperCamelCase :Union[str, Any] = s_dict.pop(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
UpperCamelCase , UpperCamelCase :List[Any] = emb.weight.shape
UpperCamelCase :Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = emb.weight.data
return lin_layer
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
UpperCamelCase :Any = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
UpperCamelCase :Dict = mam_aaa['''args''']
UpperCamelCase :Dict = mam_aaa['''model''']
UpperCamelCase :int = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
rename_keys(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = state_dict['''decoder.embed_tokens.weight'''].shape[0]
UpperCamelCase :Optional[Any] = args.share_decoder_input_output_embed
UpperCamelCase :Optional[int] = [int(SCREAMING_SNAKE_CASE__ ) for i in args.conv_kernel_sizes.split(''',''' )]
UpperCamelCase :Optional[Any] = SpeechaTextConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(SCREAMING_SNAKE_CASE__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=SCREAMING_SNAKE_CASE__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , num_beams=5 , max_length=200 , use_cache=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=2 , early_stopping=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase :List[Any] = SpeechaTextForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase :str = model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0 and not set(SCREAMING_SNAKE_CASE__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCamelCase :List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase :Any = lm_head_weights
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__snake_case = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowercase ( _UpperCAmelCase=None ) -> Optional[int]:
if subparsers is not None:
lowerCamelCase =subparsers.add_parser("""test""" )
else:
lowerCamelCase =argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=__SCREAMING_SNAKE_CASE , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def _lowercase ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
lowerCamelCase =script_name
else:
lowerCamelCase =F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase =["accelerate-launch"] + test_args.split()
lowerCamelCase =execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _lowercase ( ) -> Any:
lowerCamelCase =test_command_parser()
lowerCamelCase =parser.parse_args()
test_command(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 704 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase__ : Optional[int] ={
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
UpperCAmelCase__ : str ={
'''169M''': 7_68,
'''430M''': 10_24,
'''1B5''': 20_48,
'''3B''': 25_60,
'''7B''': 40_96,
'''14B''': 51_20,
}
def _lowercase ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase =list(state_dict.keys() )
for name in state_dict_keys:
lowerCamelCase =state_dict.pop(_UpperCAmelCase )
# emb -> embedding
if name.startswith("""emb.""" ):
lowerCamelCase =name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
lowerCamelCase =name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
lowerCamelCase =re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , _UpperCAmelCase )
# ffn -> feed_forward
lowerCamelCase =re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , _UpperCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
lowerCamelCase =name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
lowerCamelCase =name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
lowerCamelCase =name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
lowerCamelCase ="""rwkv.""" + name
lowerCamelCase =weight
return state_dict
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Tuple:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
lowerCamelCase =5_02_77
lowerCamelCase =AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
lowerCamelCase =PreTrainedTokenizerFast(tokenizer_file=_UpperCAmelCase )
lowerCamelCase =len(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
# 2. Build the config
lowerCamelCase =list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCamelCase =candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
lowerCamelCase =RwkvConfig(
vocab_size=_UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCAmelCase )
# 3. Download model file then convert state_dict
lowerCamelCase =hf_hub_download(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =torch.load(_UpperCAmelCase , map_location="""cpu""" )
lowerCamelCase =convert_state_dict(_UpperCAmelCase )
# 4. Split in shards and save
lowerCamelCase , lowerCamelCase =shard_checkpoint(_UpperCAmelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if index is not None:
lowerCamelCase =os.path.join(_UpperCAmelCase , _UpperCAmelCase )
# Save the index as well
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase =json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + """\n"""
f.write(_UpperCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
lowerCamelCase =list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCamelCase =torch.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
lowerCamelCase =AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
model.push_to_hub(_UpperCAmelCase , max_shard_size="""2GB""" )
tokenizer.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
UpperCAmelCase__ : List[Any] =parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 269 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
lowercase_ = TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] , _A : T ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = data
__SCREAMING_SNAKE_CASE : Optional[Any] = self
__SCREAMING_SNAKE_CASE : Optional[int] = 0
class __UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : dict[T, DisjointSetTreeNode[T]] = {}
def UpperCAmelCase__ ( self : Dict , _A : T ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = DisjointSetTreeNode(_A )
def UpperCAmelCase__ ( self : Optional[Any] , _A : T ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.map[data]
if elem_ref != elem_ref.parent:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase__ ( self : Optional[Any] , _A : DisjointSetTreeNode[T] , _A : DisjointSetTreeNode[T] ):
"""simple docstring"""
if nodea.rank > nodea.rank:
__SCREAMING_SNAKE_CASE : Union[str, Any] = nodea
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase__ ( self : str , _A : T , _A : T ):
"""simple docstring"""
self.link(self.find_set(_A ) , self.find_set(_A ) )
class __UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : dict[T, dict[T, int]] = {}
def UpperCAmelCase__ ( self : int , _A : T ):
"""simple docstring"""
if node not in self.connections:
__SCREAMING_SNAKE_CASE : Tuple = {}
def UpperCAmelCase__ ( self : Optional[int] , _A : T , _A : T , _A : int ):
"""simple docstring"""
self.add_node(_A )
self.add_node(_A )
__SCREAMING_SNAKE_CASE : Tuple = weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = weight
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : List[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _A : x[2] )
# creating the disjoint set
__SCREAMING_SNAKE_CASE : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_A )
# MST generation
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : List[str] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = edges[index]
index += 1
__SCREAMING_SNAKE_CASE : Dict = disjoint_set.find_set(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = disjoint_set.find_set(_A )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_A , _A , _A )
disjoint_set.union(_A , _A )
return graph
| 74 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 287 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Optional[int] = sum(_UpperCAmelCase )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return result
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> None:
if sum(_UpperCAmelCase ) > max_sum or (remaining_nums_sum + sum(_UpperCAmelCase )) < max_sum:
return
if sum(_UpperCAmelCase ) == max_sum:
result.append(_UpperCAmelCase )
return
for index in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
create_state_space_tree(
_UpperCAmelCase , _UpperCAmelCase , index + 1 , [*path, nums[index]] , _UpperCAmelCase , remaining_nums_sum - nums[index] , )
_UpperCAmelCase : Union[str, Any] = [3, 34, 4, 12, 5, 2]
_UpperCAmelCase : str = 9
_UpperCAmelCase : Any = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 707 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : str=32 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Dict=10 , UpperCAmelCase : List[str]=[10, 20, 30, 40] , UpperCAmelCase : Any=[1, 1, 2, 1] , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict="relu" , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Dict=None , ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : int = embeddings_size
lowerCamelCase__ : str = hidden_sizes
lowerCamelCase__ : Any = depths
lowerCamelCase__ : str = is_training
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Dict = num_labels
lowerCamelCase__ : Dict = scope
lowerCamelCase__ : List[str] = len(UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : str = self.get_config()
return config, pixel_values
def A_ ( self : Optional[int] ) -> Dict:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = FlaxRegNetModel(config=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = model(UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self : str , UpperCAmelCase : int , UpperCAmelCase : Tuple ) -> Tuple:
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : str = FlaxRegNetForImageClassification(config=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Dict ) -> str:
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : List[Any] ) -> None:
lowerCamelCase__ : List[Any] = FlaxRegNetModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def A_ ( self : int ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : Any ) -> str:
return
def A_ ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : Dict ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def A_ ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def A_ ( self : Any ) -> Tuple:
pass
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A_ ( self : int ) -> List[str]:
def check_hidden_states_output(UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ):
lowerCamelCase__ : Any = model_class(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Dict ) -> int:
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ):
return model(pixel_values=UpperCAmelCase , **UpperCAmelCase )
with self.subTest('JIT Enabled' ):
lowerCamelCase__ : Dict = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase__ : Optional[int] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A_ ( self : Any ) -> List[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def A_ ( self : Dict ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(images=UpperCAmelCase , return_tensors='np' )
lowerCamelCase__ : List[Any] = model(**UpperCAmelCase )
# verify the logits
lowerCamelCase__ : Union[str, Any] = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCamelCase__ : Tuple = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 188 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ , snake_case_ : Tuple = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : List[str] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
# convert to its negative
lowerCAmelCase_ = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 60 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a_, a_, unittest.TestCase ):
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowercase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_a , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowercase : Optional[Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
lowercase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , )
lowercase : str = CLIPTextModel(_a )
lowercase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_a )
lowercase : Any = CLIPTextModelWithProjection(_a )
lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_a )
lowercase : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __magic_name__ ( self , _a , _a=0 ):
lowercase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
lowercase : List[Any] = image / 2 + 0.5
if str(_a ).startswith("mps" ):
lowercase : Optional[int] = torch.manual_seed(_a )
else:
lowercase : str = torch.Generator(device=_a ).manual_seed(_a )
lowercase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.7_5,
}
return inputs
def __magic_name__ ( self ):
lowercase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[int] = self.get_dummy_components()
lowercase : str = StableDiffusionXLImgaImgPipeline(**_a )
lowercase : List[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowercase : Optional[Any] = self.get_dummy_inputs(_a )
lowercase : Tuple = sd_pipe(**_a ).images
lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Tuple = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __magic_name__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.get_dummy_components()
lowercase : Optional[Any] = StableDiffusionXLImgaImgPipeline(**_a )
lowercase : Any = sd_pipe.to(_a )
lowercase : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
# forward without prompt embeds
lowercase : Union[str, Any] = self.get_dummy_inputs(_a )
lowercase : int = 3 * ["this is a negative prompt"]
lowercase : Any = negative_prompt
lowercase : List[str] = 3 * [inputs["prompt"]]
lowercase : Optional[int] = sd_pipe(**_a )
lowercase : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowercase : Any = self.get_dummy_inputs(_a )
lowercase : List[str] = 3 * ["this is a negative prompt"]
lowercase : Optional[Any] = 3 * [inputs.pop("prompt" )]
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[Any] = sd_pipe.encode_prompt(_a , negative_prompt=_a )
lowercase : int = sd_pipe(
**_a , prompt_embeds=_a , negative_prompt_embeds=_a , pooled_prompt_embeds=_a , negative_pooled_prompt_embeds=_a , )
lowercase : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ):
lowercase : Optional[int] = torch.Generator(device=_a ).manual_seed(_a )
lowercase : Any = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowercase : List[str] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowercase : List[Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self ):
lowercase : List[Any] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowercase : Any = self.get_inputs(_a )
lowercase : int = pipe(**_a ).images
lowercase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[Any] = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 361 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 718 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowercase_ = float("""nan""")
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = sys.stdout
__SCREAMING_SNAKE_CASE : int = open(_A , '''a''' )
def __getattr__( self : int , _A : str ):
"""simple docstring"""
return getattr(self.stdout , _A )
def UpperCAmelCase__ ( self : Dict , _A : Any ):
"""simple docstring"""
self.stdout.write(_A )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , _A , 0 , re.M ) )
def a__ ( snake_case=80 , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
# deal with critical env vars
__SCREAMING_SNAKE_CASE : List[Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
__SCREAMING_SNAKE_CASE : Any = os.environ.get(snake_case , snake_case )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__SCREAMING_SNAKE_CASE : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(snake_case )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
while len(snake_case ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(snake_case ) == 0 or len(snake_case ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
return "\\\n".join(snake_case )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# unwrap multi-line input
__SCREAMING_SNAKE_CASE : Dict = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
__SCREAMING_SNAKE_CASE : Any = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__SCREAMING_SNAKE_CASE : Any = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.run(snake_case , capture_output=snake_case , text=snake_case )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
__SCREAMING_SNAKE_CASE : Optional[int] = variation.replace(''' ''' , '''-''' )
with open(Path(snake_case ) / F'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(snake_case ) / F'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : Any = json.load(snake_case )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : str = F'''{id}: {variation:<{longest_variation_len}}'''
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{preamble}: '''
__SCREAMING_SNAKE_CASE : Optional[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case ) , desc=snake_case , leave=snake_case ):
__SCREAMING_SNAKE_CASE : str = process_run_single(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(snake_case ):
metrics.append(snake_case )
results.append(snake_case )
outcome += "✓"
else:
outcome += "✘"
__SCREAMING_SNAKE_CASE : str = F'''\33[2K\r{outcome}'''
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__SCREAMING_SNAKE_CASE : Optional[Any] = round(mean_metrics[target_metric_key] , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{outcome} {mean_target}'''
if len(snake_case ) > 1:
results_str += F''' {tuple(round(snake_case , 2 ) for x in results )}'''
print(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = variation
return mean_metrics
else:
print(snake_case )
return {variation_key: variation, target_metric_key: nan}
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = pd.DataFrame(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = '''variation'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''diff_%'''
__SCREAMING_SNAKE_CASE : str = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__SCREAMING_SNAKE_CASE : List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case ):
# as a fallback, use the minimal value as the sentinel
__SCREAMING_SNAKE_CASE : Optional[Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = df.apply(
lambda snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
__SCREAMING_SNAKE_CASE : List[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__SCREAMING_SNAKE_CASE : Union[str, Any] = df.reindex(snake_case , axis='''columns''' ) # reorder cols
# capitalize
__SCREAMING_SNAKE_CASE : str = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
__SCREAMING_SNAKE_CASE : Any = df.rename(lambda snake_case : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
__SCREAMING_SNAKE_CASE : int = df.rename(lambda snake_case : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
__SCREAMING_SNAKE_CASE : int = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case , floatfmt='''.2f''' )]
print('''\n\n'''.join(snake_case ) )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=snake_case , type=snake_case , required=snake_case , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=snake_case , type=snake_case , nargs='''+''' , required=snake_case , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=snake_case , type=snake_case , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=snake_case , type=snake_case , required=snake_case , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=snake_case , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=snake_case , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=snake_case , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=snake_case , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : str = args.output_dir
Path(snake_case ).mkdir(exist_ok=snake_case )
__SCREAMING_SNAKE_CASE : int = get_base_command(snake_case , snake_case )
# split each dimension into its --foo variations
__SCREAMING_SNAKE_CASE : Optional[Any] = [list(map(str.strip , re.split(R'''\|''' , snake_case ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(map(str.strip , map(''' '''.join , itertools.product(*snake_case ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = max(len(snake_case ) for x in variations )
# split wanted keys
__SCREAMING_SNAKE_CASE : List[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
__SCREAMING_SNAKE_CASE : Any = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__SCREAMING_SNAKE_CASE : str = Tee(snake_case )
print(F'''\n*** Running {len(snake_case )} benchmarks:''' )
print(F'''Base command: {" ".join(snake_case )}''' )
__SCREAMING_SNAKE_CASE : str = '''variation'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for id, variation in enumerate(tqdm(snake_case , desc='''Total completion: ''' , leave=snake_case ) ):
__SCREAMING_SNAKE_CASE : int = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case , snake_case , snake_case , snake_case , args.target_metric_key , snake_case , args.repeat_times , snake_case , args.verbose , ) )
process_results(snake_case , args.target_metric_key , snake_case , args.base_variation , snake_case )
if __name__ == "__main__":
main()
| 131 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Dict = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """xglm"""
__lowercase = ["""past_key_values"""]
__lowercase = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase_=25_60_08 , lowerCAmelCase_=20_48 , lowerCAmelCase_=10_24 , lowerCAmelCase_=40_96 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = ffn_dim
_snake_case = num_layers
_snake_case = attention_heads
_snake_case = activation_function
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = layerdrop
_snake_case = init_std
_snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case = use_cache
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 495 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """dandelin/vilt-b32-finetuned-vqa"""
__lowercase = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__lowercase = """image_qa"""
__lowercase = AutoProcessor
__lowercase = AutoModelForVisualQuestionAnswering
__lowercase = ["""image""", """text"""]
__lowercase = ["""text"""]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return self.pre_processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='pt' )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowerCAmelCase_ ).logits
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 495 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCAmelCase_ = numpy.array([0, 0])
UpperCAmelCase_ = numpy.array([0.5, 0.8660254])
UpperCAmelCase_ = numpy.array([1, 0])
UpperCAmelCase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase ( A__ , A__ ) -> Any:
_snake_case : Tuple = initial_vectors
for _ in range(snake_case_ ):
_snake_case : List[Any] = iteration_step(snake_case_ )
return vectors
def UpperCAmelCase ( A__ ) -> List[Any]:
_snake_case : Dict = []
for i, start_vector in enumerate(vectors[:-1] ):
_snake_case : List[str] = vectors[i + 1]
new_vectors.append(snake_case_ )
_snake_case : List[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase ( A__ , A__ ) -> Optional[int]:
_snake_case : Optional[Any] = numpy.radians(snake_case_ )
_snake_case : Optional[Any] = numpy.cos(snake_case_ ), numpy.sin(snake_case_ )
_snake_case : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case_ , snake_case_ )
def UpperCAmelCase ( A__ ) -> str:
_snake_case : int = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_snake_case : int = zip(*snake_case_ )
plt.plot(snake_case_ , snake_case_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 710 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'blip_2_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE__=14_08 , SCREAMING_SNAKE_CASE__=61_44 , SCREAMING_SNAKE_CASE__=39 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0_0001 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1e-10 , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = hidden_size
_snake_case : int = intermediate_size
_snake_case : int = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : Tuple = patch_size
_snake_case : Optional[int] = image_size
_snake_case : Tuple = initializer_range
_snake_case : List[str] = attention_dropout
_snake_case : Any = layer_norm_eps
_snake_case : int = hidden_act
_snake_case : List[Any] = qkv_bias
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
_snake_case : List[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'blip_2_qformer'
def __init__( self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=14_08 , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case : int = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : str = hidden_act
_snake_case : Dict = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : Tuple = initializer_range
_snake_case : str = layer_norm_eps
_snake_case : Optional[int] = position_embedding_type
_snake_case : Any = cross_attention_frequency
_snake_case : int = encoder_hidden_size
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Union[str, Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
_snake_case : Optional[Any] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'blip-2'
SCREAMING_SNAKE_CASE_ = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=32 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
_snake_case : Any = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
_snake_case : Union[str, Any] = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
_snake_case : str = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_snake_case : Union[str, Any] = BlipaVisionConfig(**SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BlipaQFormerConfig(**SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_snake_case : Union[str, Any] = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = self.text_config.tie_word_embeddings
_snake_case : Optional[int] = self.text_config.is_encoder_decoder
_snake_case : Tuple = num_query_tokens
_snake_case : Tuple = self.vision_config.hidden_size
_snake_case : int = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_snake_case : List[str] = 1.0
_snake_case : int = 0.02
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Any = copy.deepcopy(self.__dict__ )
_snake_case : Union[str, Any] = self.vision_config.to_dict()
_snake_case : Optional[int] = self.qformer_config.to_dict()
_snake_case : str = self.text_config.to_dict()
_snake_case : Optional[Any] = self.__class__.model_type
return output
| 519 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__lowerCamelCase : str = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( _UpperCAmelCase ):
A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : str , **UpperCamelCase_ : Any ) -> Dict:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCamelCase_ : str = deprecated_arg[3:]
setattr(self , A_ , not kwargs.pop(A_ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowerCamelCase_ : List[Any] = kwargs.pop('''torchscript''' , self.torchscript )
lowerCamelCase_ : Tuple = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowerCamelCase_ : Any = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**A_ )
A = field(default=_UpperCAmelCase ,metadata={"help": "Trace the models using torchscript"} )
A = field(default=_UpperCAmelCase ,metadata={"help": "Print Xla/PyTorch tpu metrics"} )
A = field(
default="O1" ,metadata={
"help": (
"For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} ,)
@cached_property
def __UpperCamelCase ( self : List[str] ) -> Tuple["torch.device", int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowerCamelCase_ : Tuple = torch.device('''cpu''' )
lowerCamelCase_ : Union[str, Any] = 0
elif is_torch_tpu_available():
lowerCamelCase_ : Tuple = xm.xla_device()
lowerCamelCase_ : int = 0
else:
lowerCamelCase_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCamelCase_ : Any = torch.cuda.device_count()
return device, n_gpu
@property
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __UpperCamelCase ( self : str ) -> "torch.device":
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
return self.n_gpu > 0
| 501 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 564 | 0 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _lowercase :
pass
| 712 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : Any = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : List[str] = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : List[str] = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 497 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase = 1_2_8_0_2_2
lowercase = 1_2_8_0_2_8
@require_sentencepiece
class __A( UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MaMaaaTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
def lowercase__ ( self : int ):
super().setUp()
lowerCamelCase_ = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase_ = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
lowerCamelCase_ = Path(self.tmpdirname )
save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
lowerCamelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] , **__UpperCamelCase : Optional[int] ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowercase__ ( self : int ):
lowerCamelCase_ = """</s>"""
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( self : str ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(__UpperCamelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def lowercase__ ( self : Union[str, Any] ):
pass
def lowercase__ ( self : str ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [2, 3, 4, 5, 6] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
lowerCamelCase_ = tokenizer.convert_tokens_to_string(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , """This is a test""" )
@slow
def lowercase__ ( self : Optional[Any] ):
# fmt: off
lowerCamelCase_ = {"""input_ids""": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = '''facebook/m2m100_418M'''
SCREAMING_SNAKE_CASE = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
SCREAMING_SNAKE_CASE = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
SCREAMING_SNAKE_CASE = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowercase__ ( cls : Union[str, Any] ):
lowerCamelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
lowerCamelCase_ = 1
return cls
def lowercase__ ( self : Any ):
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 1_2_8_0_6_3 )
def lowercase__ ( self : int ):
lowerCamelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(__UpperCamelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = """en"""
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def lowercase__ ( self : Optional[int] ):
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
# fmt: off
lowerCamelCase_ = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
lowerCamelCase_ = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__UpperCamelCase )
lowerCamelCase_ = MaMaaaTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.lang_token_to_id , __UpperCamelCase )
@require_torch
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = """en"""
lowerCamelCase_ = """fr"""
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase_ = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowerCamelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowerCamelCase_ = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowerCamelCase_ = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# en_XX, A, test, EOS
"""input_ids""": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 1_2_8_0_0_6,
} , )
| 272 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def snake_case ( UpperCAmelCase : Any, UpperCAmelCase : Any ):
A = int(UpperCAmelCase )
assert noofclusters < len(UpperCAmelCase )
# Find out the dimensionality
A = len(vectors[0] )
# Will help select random centroids from among the available vectors
A = list(range(len(UpperCAmelCase ) ) )
shuffle(UpperCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
A = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
A = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
A = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
A = tf.placeholder('float64', [dim] )
A = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCAmelCase, UpperCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
A = [tf.Variable(0 ) for i in range(len(UpperCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
A = tf.placeholder('int32' )
A = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCAmelCase, UpperCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
A = tf.placeholder('float', [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
A = tf.reduce_mean(UpperCAmelCase, 0 )
##Node for computing Euclidean distances
# Placeholders for input
A = tf.placeholder('float', [dim] )
A = tf.placeholder('float', [dim] )
A = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCAmelCase, UpperCAmelCase ), 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
A = tf.placeholder('float', [noofclusters] )
A = tf.argmin(UpperCAmelCase, 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
A = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
A = 1_00
for _ in range(UpperCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCAmelCase ) ):
A = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
A = [
sess.run(UpperCAmelCase, feed_dict={va: vect, va: sess.run(UpperCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
A = sess.run(
UpperCAmelCase, feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n], feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCAmelCase ):
# Collect all the vectors assigned to this cluster
A = [
vectors[i]
for i in range(len(UpperCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
A = sess.run(
UpperCAmelCase, feed_dict={mean_input: array(UpperCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n], feed_dict={centroid_value: new_location} )
# Return centroids and assignments
A = sess.run(UpperCAmelCase )
A = sess.run(UpperCAmelCase )
return centroids, assignments
| 110 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def snake_case ( UpperCAmelCase : List[Any] ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase, UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCAmelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
A , A = emb.weight.shape
A = nn.Linear(UpperCAmelCase, UpperCAmelCase, bias=UpperCAmelCase )
A = emb.weight.data
return lin_layer
def snake_case ( UpperCAmelCase : str, UpperCAmelCase : str ):
A = torch.load(UpperCAmelCase, map_location='cpu' )
A = mam_aaa['args']
A = mam_aaa['model']
A = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
A = state_dict['decoder.embed_tokens.weight'].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(',' )]
A = SpeechaTextConfig(
vocab_size=UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', num_conv_layers=len(UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=UpperCAmelCase, num_beams=5, max_length=2_00, use_cache=UpperCAmelCase, decoder_start_token_id=2, early_stopping=UpperCAmelCase, )
A = SpeechaTextForConditionalGeneration(UpperCAmelCase )
A , A = model.model.load_state_dict(UpperCAmelCase, strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f' but all the following weights are missing {missing}' )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 110 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class lowerCamelCase_ ( __snake_case ):
a__ = '''blip_text_model'''
def __init__( self , __lowerCAmelCase=3_0_5_2_4 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=8 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0 , __lowerCAmelCase=1_0_2 , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , sep_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Any = vocab_size
__magic_name__ :Any = hidden_size
__magic_name__ :List[Any] = encoder_hidden_size
__magic_name__ :List[Any] = intermediate_size
__magic_name__ :Dict = projection_dim
__magic_name__ :List[Any] = hidden_dropout_prob
__magic_name__ :Tuple = num_hidden_layers
__magic_name__ :List[str] = num_attention_heads
__magic_name__ :List[str] = max_position_embeddings
__magic_name__ :int = layer_norm_eps
__magic_name__ :int = hidden_act
__magic_name__ :List[str] = initializer_range
__magic_name__ :List[str] = attention_probs_dropout_prob
__magic_name__ :List[Any] = is_decoder
__magic_name__ :int = use_cache
@classmethod
def A ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCAmelCase )
__magic_name__ :Optional[Any] = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__magic_name__ :Any = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class lowerCamelCase_ ( __snake_case ):
a__ = '''blip_vision_model'''
def __init__( self , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_8_4 , __lowerCAmelCase=1_6 , __lowerCAmelCase="gelu" , __lowerCAmelCase=1E-5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1E-10 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
__magic_name__ :List[str] = hidden_size
__magic_name__ :Optional[int] = intermediate_size
__magic_name__ :Optional[int] = projection_dim
__magic_name__ :List[str] = num_hidden_layers
__magic_name__ :List[str] = num_attention_heads
__magic_name__ :int = patch_size
__magic_name__ :Tuple = image_size
__magic_name__ :Dict = initializer_range
__magic_name__ :Any = attention_dropout
__magic_name__ :Union[str, Any] = layer_norm_eps
__magic_name__ :str = hidden_act
@classmethod
def A ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__magic_name__ :List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class lowerCamelCase_ ( __snake_case ):
a__ = '''blip'''
a__ = True
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2.6592 , __lowerCAmelCase=2_5_6 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if text_config is None:
__magic_name__ :Any = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
__magic_name__ :List[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
__magic_name__ :Optional[Any] = BlipTextConfig(**__lowerCAmelCase )
__magic_name__ :Tuple = BlipVisionConfig(**__lowerCAmelCase )
__magic_name__ :Tuple = self.vision_config.hidden_size
__magic_name__ :Optional[int] = projection_dim
__magic_name__ :Optional[Any] = logit_scale_init_value
__magic_name__ :int = 1.0
__magic_name__ :List[str] = 0.02
__magic_name__ :List[str] = image_text_hidden_size
@classmethod
def A ( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = copy.deepcopy(self.__dict__ )
__magic_name__ :Any = self.text_config.to_dict()
__magic_name__ :str = self.vision_config.to_dict()
__magic_name__ :Any = self.__class__.model_type
return output
| 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
A : Optional[Any] = [x.strip() for x in open(snake_case__ ).readlines()]
A : Tuple = [x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )]
A : Union[str, Any] = calculate_rouge(snake_case__ , snake_case__ , **snake_case__ )
if save_path is not None:
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 634 | 0 |
'''simple docstring'''
import re
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_ ) ) != len(lowerCAmelCase_ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
import string
import numpy
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , __lowerCamelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCAmelCase__ = numpy.vectorize(lambda snake_case_ : x % 36 )
lowerCAmelCase__ = numpy.vectorize(snake_case_ )
def __init__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
lowercase_ = self.modulus(UpperCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowercase_ = encrypt_key.shape[0]
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self.key_string.index(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self.key_string[round(UpperCAmelCase )]
def A__ ( self ) -> None:
'''simple docstring'''
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase , len(self.key_string ) ) != 1:
lowercase_ = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [char for char in text.upper() if char in self.key_string]
lowercase_ = chars[-1]
while len(UpperCAmelCase ) % self.break_key != 0:
chars.append(UpperCAmelCase )
return "".join(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.process_text(text.upper() )
lowercase_ = ""
for i in range(0 , len(UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(UpperCAmelCase ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(self.encrypt_key.dot(UpperCAmelCase ) ).T.tolist()[
0
]
lowercase_ = "".join(
self.replace_digits(UpperCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def A__ ( self ) -> numpy.ndarray:
'''simple docstring'''
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowercase_ = i
break
lowercase_ = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.make_decrypt_key()
lowercase_ = self.process_text(text.upper() )
lowercase_ = ""
for i in range(0 , len(UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(UpperCAmelCase ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(decrypt_key.dot(UpperCAmelCase ) ).T.tolist()[0]
lowercase_ = "".join(
self.replace_digits(UpperCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = int(input("Enter the order of the encryption key: " ) )
lowercase_ = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(__lowerCamelCase ):
lowercase_ = [int(__lowerCamelCase ) for x in input().split()]
hill_matrix.append(__lowerCamelCase )
lowercase_ = HillCipher(numpy.array(__lowerCamelCase ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
lowercase_ = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
lowercase_ = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(__lowerCamelCase ) )
elif option == "2":
lowercase_ = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 601 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCamelCase_ = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Tuple , **UpperCAmelCase__ : int ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase : Optional[int] =deprecated_arg[3:]
setattr(self , UpperCAmelCase__ , not kwargs.pop(UpperCAmelCase__ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase : int =kwargs.pop('''torchscript''' , self.torchscript )
lowercase : List[str] =kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowercase : int =kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**UpperCAmelCase__ )
lowerCamelCase_ = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
lowerCamelCase_ = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
lowerCamelCase_ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowercase : List[Any] =torch.device('''cpu''' )
lowercase : int =0
elif is_torch_tpu_available():
lowercase : Optional[Any] =xm.xla_device()
lowercase : int =0
else:
lowercase : Optional[Any] =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase : str =torch.cuda.device_count()
return device, n_gpu
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return self.n_gpu > 0
| 92 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> str:
a_ : Tuple = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : Any = downstream_dict["projector.weight"]
a_ : Dict = downstream_dict["projector.bias"]
a_ : Tuple = downstream_dict["model.post_net.linear.weight"]
a_ : int = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = downstream_dict["model.linear.weight"]
a_ : List[Any] = downstream_dict["model.linear.bias"]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : int = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : Any = downstream_dict["connector.weight"]
a_ : Tuple = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ : List[str] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ : int = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ : str = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ : List[str] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__, map_location="cpu" )
a_ : List[str] = checkpoint["Downstream"]
a_ : Union[str, Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE__, return_attention_mask=SCREAMING_SNAKE_CASE__, do_normalize=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ : int = convert_classification(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ : Any = convert_diarization(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForXVector" ):
a_ : Any = convert_xvector(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ : Tuple = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 237 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCamelCase = """pt"""
elif is_tf_available():
_UpperCamelCase = """tf"""
else:
_UpperCamelCase = """jax"""
class lowerCamelCase__ ( _snake_case, unittest.TestCase ):
'''simple docstring'''
A__ = ByTaTokenizer
A__ = False
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowercase__ ( self : Any , **__A : Union[str, Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def lowercase__ ( self : Tuple , __A : Dict , __A : Union[str, Any]=False , __A : Optional[int]=20 , __A : str=5 ) -> Tuple[str, list]:
'''simple docstring'''
lowerCAmelCase__ = []
for i in range(len(lowerCAmelCase__ ) ):
try:
lowerCAmelCase__ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ = list(filter(lambda __A : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , lowerCAmelCase__ ) )
lowerCAmelCase__ = list(filter(lambda __A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
lowerCAmelCase__ = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
lowerCAmelCase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
lowerCAmelCase__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
lowerCAmelCase__ = """ """ + output_txt
lowerCAmelCase__ = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCAmelCase__ = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = """Unicode €."""
lowerCAmelCase__ = tokenizer(lowerCAmelCase__ )
lowerCAmelCase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , lowerCAmelCase__ )
# decoding
lowerCAmelCase__ = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , """Unicode €.</s>""" )
lowerCAmelCase__ = tokenizer("""e è é ê ë""" )
lowerCAmelCase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , lowerCAmelCase__ )
# decoding
lowerCAmelCase__ = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCAmelCase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCAmelCase__ = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
lowerCAmelCase__ = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase__ = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowerCAmelCase__ )
self.assertIn("""attention_mask""" , lowerCAmelCase__ )
self.assertNotIn("""decoder_input_ids""" , lowerCAmelCase__ )
self.assertNotIn("""decoder_attention_mask""" , lowerCAmelCase__ )
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCAmelCase__ = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ["""A long paragraph for summarization. </s>"""]
lowerCAmelCase__ = ["""Summary of the text. </s>"""]
# fmt: off
lowerCAmelCase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCAmelCase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCAmelCase__ = tokenizer(lowerCAmelCase__ , text_target=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch["""input_ids"""][0] )
self.assertEqual(lowerCAmelCase__ , batch["""labels"""][0] )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = """ He is very happy, UNwant\u00E9d,running"""
lowerCAmelCase__ = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
lowerCAmelCase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCAmelCase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCAmelCase__ = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCAmelCase__ = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCAmelCase__ = json.load(lowerCAmelCase__ )
lowerCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(125 )]
lowerCAmelCase__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCAmelCase__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowerCAmelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ = tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowerCAmelCase__ )]
lowerCAmelCase__ = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ = tokenizer_class.from_pretrained(lowerCAmelCase__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCAmelCase__ = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCAmelCase__ = 0
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + """_id""" , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + """_id""" ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , attr + """_id""" , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + """_id""" ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens_ids""" ) , [] )
setattr(lowerCAmelCase__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 701 |
'''simple docstring'''
import argparse
import datetime
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> str:
lowerCAmelCase__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowerCAmelCase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCAmelCase_ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowerCAmelCase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowerCAmelCase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowerCAmelCase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowerCAmelCase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowerCAmelCase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowerCAmelCase__ = datetime.date(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) )
# Start math
if m <= 2:
lowerCAmelCase__ = y - 1
lowerCAmelCase__ = m + 12
# maths var
lowerCAmelCase__ = int(str(UpperCAmelCase_ )[:2] )
lowerCAmelCase__ = int(str(UpperCAmelCase_ )[2:] )
lowerCAmelCase__ = int(2.6 * m - 5.39 )
lowerCAmelCase__ = int(c / 4 )
lowerCAmelCase__ = int(k / 4 )
lowerCAmelCase__ = int(d + k )
lowerCAmelCase__ = int(t + u + v + x )
lowerCAmelCase__ = int(z - (2 * c) )
lowerCAmelCase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowerCAmelCase__ = F'''Your date {date_input}, is a {days[str(UpperCAmelCase_ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 211 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = "audio-spectrogram-transformer"
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=768 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Dict=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Any=1E-12 , __SCREAMING_SNAKE_CASE : int=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=10 , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : List[str]=1_024 , __SCREAMING_SNAKE_CASE : str=128 , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = frequency_stride
__SCREAMING_SNAKE_CASE = time_stride
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = num_mel_bins
| 627 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : Any = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["""MobileViTFeatureExtractor"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 79 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_lowerCAmelCase : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_lowerCAmelCase : List[str] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_lowerCAmelCase : Optional[Any] = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCAmelCase : List[str] = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 710 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =42
# setable values
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
@classmethod
def __UpperCamelCase ( cls : Tuple , a : CommonSchedulerState , a : jnp.ndarray , a : jnp.ndarray ) -> Dict:
"""simple docstring"""
return cls(common=a , init_noise_sigma=a , timesteps=a )
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ =[e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase__ =42
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return True
@register_to_config
def __init__( self : str , a : int = 1000 , a : float = 0.0001 , a : float = 0.02 , a : str = "linear" , a : Optional[jnp.ndarray] = None , a : str = "fixed_small" , a : bool = True , a : str = "epsilon" , a : jnp.dtype = jnp.floataa , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = dtype
def __UpperCamelCase ( self : Any , a : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
SCREAMING_SNAKE_CASE : Optional[int] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : Tuple = jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=a , init_noise_sigma=a , timesteps=a , )
def __UpperCamelCase ( self : List[Any] , a : DDPMSchedulerState , a : jnp.ndarray , a : Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def __UpperCamelCase ( self : Optional[int] , a : DDPMSchedulerState , a : int , a : Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE : Dict = (jnp.arange(0 , a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=a , timesteps=a , )
def __UpperCamelCase ( self : Optional[Any] , a : DDPMSchedulerState , a : List[str] , a : Any=None , a : Dict=None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE : str = jnp.clip(a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE : Any = jnp.log(jnp.clip(a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE : int = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE : str = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE : Any = variance
SCREAMING_SNAKE_CASE : int = state.common.betas[t]
SCREAMING_SNAKE_CASE : Dict = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase ( self : Any , a : DDPMSchedulerState , a : jnp.ndarray , a : int , a : jnp.ndarray , a : Optional[jax.random.KeyArray] = None , a : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = timestep
if key is None:
SCREAMING_SNAKE_CASE : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.split(a , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE : int = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE : Tuple = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE : str = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE : Tuple = jnp.clip(a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE : int = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(a , num=1 )
SCREAMING_SNAKE_CASE : Dict = jax.random.normal(a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(a , a , predicted_variance=a ) ** 0.5) * noise
SCREAMING_SNAKE_CASE : str = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=a , state=a )
def __UpperCamelCase ( self : Union[str, Any] , a : DDPMSchedulerState , a : jnp.ndarray , a : jnp.ndarray , a : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , a , a , a )
def __UpperCamelCase ( self : str , a : DDPMSchedulerState , a : jnp.ndarray , a : jnp.ndarray , a : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , a , a , a )
def __len__( self : int ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps | 25 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = IFInpaintingPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def snake_case__ ( self : List[Any] ) ->Any:
'''simple docstring'''
return self._get_dummy_components()
def snake_case__ ( self : Dict , lowercase__ : List[str] , lowercase__ : str=0 ) ->Dict:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith("mps" ):
_UpperCamelCase : List[str] = torch.manual_seed(UpperCamelCase_ )
else:
_UpperCamelCase : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_UpperCamelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
_UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
_UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case__ ( self : Tuple ) ->str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def snake_case__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case__ ( self : int ) ->Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
self._test_save_load_local()
def snake_case__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 704 | '''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Any = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Optional[int] = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Optional[int] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowerCAmelCase_ : List[Any] = """3.0.12"""
lowerCAmelCase_ : Optional[int] = None
def __A ( ) -> Optional[Any]:
'''simple docstring'''
global _logger
_UpperCamelCase : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = lock_file
return None
def __str__( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[int] = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = lock
return None
def __enter__( self : Any ) ->List[Any]:
'''simple docstring'''
return self.lock
def __exit__( self : Union[str, Any] , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : int ) ->str:
'''simple docstring'''
self.lock.release()
return None
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , lowercase__ : Tuple , lowercase__ : Any=-1 , lowercase__ : int=None ) ->int:
'''simple docstring'''
_UpperCamelCase : int = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_UpperCamelCase : str = self.hash_filename_if_too_long(lowercase__ , lowercase__ )
# The path to the lock file.
_UpperCamelCase : Optional[int] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_UpperCamelCase : Optional[int] = None
# The default timeout value.
_UpperCamelCase : Any = timeout
# We use this lock primarily for the lock counter.
_UpperCamelCase : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_UpperCamelCase : Any = 0
return None
@property
def snake_case__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
return self._lock_file
@property
def snake_case__ ( self : int ) ->str:
'''simple docstring'''
return self._timeout
@timeout.setter
def snake_case__ ( self : Tuple , lowercase__ : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : str = float(lowercase__ )
return None
def snake_case__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
raise NotImplementedError()
def snake_case__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
@property
def snake_case__ ( self : str ) ->Tuple:
'''simple docstring'''
return self._lock_file_fd is not None
def snake_case__ ( self : Optional[Any] , lowercase__ : Optional[int]=None , lowercase__ : str=0.0_5 ) ->str:
'''simple docstring'''
if timeout is None:
_UpperCamelCase : Tuple = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_UpperCamelCase : List[str] = id(self )
_UpperCamelCase : List[str] = self._lock_file
_UpperCamelCase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowercase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def snake_case__ ( self : Dict , lowercase__ : str=False ) ->Tuple:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_UpperCamelCase : Tuple = id(self )
_UpperCamelCase : str = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
_UpperCamelCase : List[str] = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : Optional[int] ) ->str:
'''simple docstring'''
self.acquire()
return self
def __exit__( self : List[Any] , lowercase__ : int , lowercase__ : Dict , lowercase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
self.release()
return None
def __del__( self : Dict ) ->int:
'''simple docstring'''
self.release(force=lowercase__ )
return None
def snake_case__ ( self : Tuple , lowercase__ : str , lowercase__ : int ) ->str:
'''simple docstring'''
_UpperCamelCase : Any = os.path.basename(lowercase__ )
if len(lowercase__ ) > max_length and max_length > 0:
_UpperCamelCase : Optional[Any] = os.path.dirname(lowercase__ )
_UpperCamelCase : Tuple = str(hash(lowercase__ ) )
_UpperCamelCase : Dict = filename[: max_length - len(lowercase__ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowercase__ , lowercase__ )
else:
return path
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , lowercase__ : str , lowercase__ : int=-1 , lowercase__ : Optional[Any]=None ) ->Optional[Any]:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowercase__ , timeout=lowercase__ , max_filename_length=lowercase__ )
_UpperCamelCase : int = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def snake_case__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_UpperCamelCase : str = os.open(self._lock_file , lowercase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowercase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowercase__ )
else:
_UpperCamelCase : Union[str, Any] = fd
return None
def snake_case__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self._lock_file_fd
_UpperCamelCase : Optional[int] = None
msvcrt.locking(lowercase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowercase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : Any , lowercase__ : Union[str, Any]=-1 , lowercase__ : Dict=None ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = os.statvfs(os.path.dirname(lowercase__ ) ).f_namemax
super().__init__(lowercase__ , timeout=lowercase__ , max_filename_length=lowercase__ )
def snake_case__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_UpperCamelCase : str = os.open(self._lock_file , lowercase__ )
try:
fcntl.flock(lowercase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowercase__ )
else:
_UpperCamelCase : Dict = fd
return None
def snake_case__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCamelCase : List[str] = self._lock_file_fd
_UpperCamelCase : List[Any] = None
fcntl.flock(lowercase__ , fcntl.LOCK_UN )
os.close(lowercase__ )
return None
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_UpperCamelCase : Any = os.open(self._lock_file , lowercase__ )
except OSError:
pass
else:
_UpperCamelCase : str = fd
return None
def snake_case__ ( self : Dict ) ->int:
'''simple docstring'''
os.close(self._lock_file_fd )
_UpperCamelCase : Dict = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : List[Any] = None
if msvcrt:
lowerCAmelCase_ : Dict = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : int = UnixFileLock
else:
lowerCAmelCase_ : Tuple = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 204 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
'''simple docstring'''
@staticmethod
def _snake_case ( *lowerCamelCase : Optional[int] , **lowerCamelCase : int ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
_snake_case : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__lowercase = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def _snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = object_detector(examples[0] , threshold=0.0 )
__lowercase = len(__snake_case )
self.assertGreater(__snake_case , 0 )
self.assertEqual(
__snake_case , [
{
"score": ANY(__snake_case ),
"label": ANY(__snake_case ),
"box": {"xmin": ANY(__snake_case ), "ymin": ANY(__snake_case ), "xmax": ANY(__snake_case ), "ymax": ANY(__snake_case )},
}
for i in range(__snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__lowercase = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
__lowercase = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = pipeline("zero-shot-object-detection" )
__lowercase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
__lowercase = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def _snake_case ( self : Any ):
'''simple docstring'''
pass
@require_torch
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = 0.2
__lowercase = pipeline("zero-shot-object-detection" )
__lowercase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = 2
__lowercase = pipeline("zero-shot-object-detection" )
__lowercase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 402 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
_SCREAMING_SNAKE_CASE = tuple[int, int]
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] )-> None:
snake_case = vertices
snake_case = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def lowerCAmelCase ( self : Optional[Any] , __snake_case : EdgeT , __snake_case : int )-> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
snake_case = weight
def lowerCAmelCase ( self : str )-> Graph:
snake_case = Graph({min(self.vertices )} , {} )
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
while len(subgraph.vertices ) < len(self.vertices ):
snake_case = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
snake_case = edge
snake_case = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def __lowerCamelCase ( __lowerCAmelCase : str = "p107_network.txt" ) -> int:
snake_case = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
snake_case = {}
snake_case = 42
snake_case = 42
snake_case = 42
with open(__lowerCAmelCase ) as f:
snake_case = f.read().strip().split("""\n""" )
snake_case = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(__lowerCAmelCase ) ):
for edgea in range(__lowerCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
snake_case = int(adjaceny_matrix[edgea][edgea] )
snake_case = Graph(set(range(len(__lowerCAmelCase ) ) ) , __lowerCAmelCase )
snake_case = graph.prims_algorithm()
snake_case = sum(graph.edges.values() )
snake_case = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 369 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self: Tuple ):
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self: Dict ):
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _A ( self: List[str] ):
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__UpperCamelCase , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 346 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCamelCase ) -> bytes:
if len(_UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCamelCase ) -> bytes:
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(_UpperCamelCase , '''08x''' )[-8:]
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCamelCase ) -> bytes:
_a = b''''''
for char in message:
bit_string += format(_UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(_UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCamelCase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCamelCase ) -> Generator[list[int], None, None]:
if len(_UpperCamelCase ) % 5_12 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCamelCase ) , 5_12 ):
_a = bit_string[pos : pos + 5_12]
_a = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCamelCase ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(_UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCamelCase , 2 )
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
return (a + b) % 2**32
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCamelCase ) -> bytes:
_a = preprocess(_UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67_45_23_01
_a = 0XEF_CD_AB_89
_a = 0X98_BA_DC_FE
_a = 0X10_32_54_76
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(_UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(_UpperCamelCase , left_rotate_aa(_UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""YolosFeatureExtractor"""]
UpperCamelCase_ = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : int = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["""YolosFeatureExtractor"""]
a__ : List[str] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 165 | 0 |
import requests
__A : Union[str, Any] = """YOUR API KEY"""
def __UpperCamelCase ( _A : Dict , _A : Dict = giphy_api_key ) ->list:
"""simple docstring"""
lowerCamelCase_ ="+".join(query.split() )
lowerCamelCase_ =f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
lowerCamelCase_ =requests.get(_A ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 718 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase__):
_UpperCamelCase:List[Any] = ["torch", "torchsde"]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> List[Any]:
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Union[str, Any]:
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> str:
requires_backends(cls , ["""torch""", """torchsde"""] )
| 75 | 0 |
import argparse
A__: List[Any] = '''docs/source/_static/js/custom.js'''
def lowerCAmelCase_ ( A_):
with open(A_ ,encoding="utf-8" ,newline="\n") as f:
UpperCamelCase__: Tuple = f.readlines()
UpperCamelCase__: Dict = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion ="):
index += 1
UpperCamelCase__: Dict = F"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {"):
index += 1
# We go until the end
while not lines[index].startswith("}"):
index += 1
# We add the new version at the end
lines[index - 1] += F" \"v{version}\": \"v{version}\",\n"
with open(A_ ,"w" ,encoding="utf-8" ,newline="\n") as f:
f.writelines(A_)
if __name__ == "__main__":
A__: Any = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
A__: Any = parser.parse_args()
update_custom_js(args.version)
| 380 |
def lowerCAmelCase_ ( A_):
if not all(char in "01" for char in bin_string):
raise ValueError("Non-binary value was passed to the function")
if not bin_string:
raise ValueError("Empty string was passed to the function")
UpperCamelCase__: List[Any] = ""
while len(A_) % 3 != 0:
UpperCamelCase__: int = "0" + bin_string
UpperCamelCase__: Optional[int] = [
bin_string[index : index + 3]
for index in range(len(A_))
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase__: Union[str, Any] = 0
for index, val in enumerate(A_):
oct_val += int(2 ** (2 - index) * int(A_))
oct_string += str(A_)
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 380 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE : int = "AutoTokenizer"
_SCREAMING_SNAKE_CASE : Optional[Any] = ["tokenizer"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase )
a =speaker_embeddings
@classmethod
def lowerCAmelCase__ ( cls , _lowerCAmelCase , _lowerCAmelCase="speaker_embeddings_path.json" , **_lowerCAmelCase ):
if speaker_embeddings_dict_path is not None:
a =get_file_from_repo(
_lowerCAmelCase , _lowerCAmelCase , subfolder=kwargs.pop("""subfolder""" , _lowerCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , _lowerCAmelCase ) , force_download=kwargs.pop("""force_download""" , _lowerCAmelCase ) , proxies=kwargs.pop("""proxies""" , _lowerCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , _lowerCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , _lowerCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , _lowerCAmelCase ) , revision=kwargs.pop("""revision""" , _lowerCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(_lowerCAmelCase , _lowerCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
a =None
else:
with open(_lowerCAmelCase ) as speaker_embeddings_json:
a =json.load(_lowerCAmelCase )
else:
a =None
a =AutoTokenizer.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
return cls(tokenizer=_lowerCAmelCase , speaker_embeddings=_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase="speaker_embeddings_path.json" , _lowerCAmelCase="speaker_embeddings" , _lowerCAmelCase = False , **_lowerCAmelCase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowerCAmelCase , _lowerCAmelCase , """v2""" ) , exist_ok=_lowerCAmelCase )
a ={}
a =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
a =self._load_voice_preset(_lowerCAmelCase )
a ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , _lowerCAmelCase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowerCAmelCase , )
a =os.path.join(_lowerCAmelCase , F'''{prompt_key}_{key}.npy''' )
a =tmp_dict
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """w""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase = None , **_lowerCAmelCase ):
a =self.speaker_embeddings[voice_preset]
a ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
a =get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , _lowerCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , _lowerCAmelCase ) , force_download=kwargs.pop("""force_download""" , _lowerCAmelCase ) , proxies=kwargs.pop("""proxies""" , _lowerCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , _lowerCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , _lowerCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , _lowerCAmelCase ) , revision=kwargs.pop("""revision""" , _lowerCAmelCase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
a =np.load(_lowerCAmelCase )
return voice_preset_dict
def lowerCAmelCase__ ( self , _lowerCAmelCase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="pt" , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , **_lowerCAmelCase , ):
if voice_preset is not None and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
a =self._load_voice_preset(_lowerCAmelCase )
else:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not voice_preset.endswith(""".npz""" ):
a =voice_preset + """.npz"""
a =np.load(_lowerCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowerCAmelCase , **_lowerCAmelCase )
a =BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
a =self.tokenizer(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
if voice_preset is not None:
a =voice_preset
return encoded_text
| 709 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = ["image_processor", "tokenizer"]
_SCREAMING_SNAKE_CASE : Optional[int] = "Pix2StructImageProcessor"
_SCREAMING_SNAKE_CASE : List[str] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
a =False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 2_048 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
a =self.tokenizer
a =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
a =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_patches=_lowerCAmelCase , **_lowerCAmelCase )
else:
# add pixel_values and bbox
a =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and not self.image_processor.is_vqa:
a =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
if "attention_mask" in text_encoding:
a =text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
a =text_encoding.pop("""input_ids""" )
else:
a =None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def lowerCAmelCase__ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def lowerCAmelCase__ ( self ):
a =self.tokenizer.model_input_names
a =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 321 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
a ={
"""AI-Sweden/gpt-sw3-126m""": 2048,
"""AI-Sweden/gpt-sw3-350m""": 2048,
"""AI-Sweden/gpt-sw3-1.6b""": 2048,
"""AI-Sweden/gpt-sw3-6.7b""": 2048,
"""AI-Sweden/gpt-sw3-20b""": 2048,
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Tuple = None ,**SCREAMING_SNAKE_CASE__ : Any ,):
__lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCamelCase : Optional[Any] = kwargs.get('name_or_path')
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored')
__lowerCamelCase : Dict = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__lowerCamelCase : Optional[Any] = '<|endoftext|>' if eos_token is None else eos_token
__lowerCamelCase : int = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__lowerCamelCase : Dict = unk_token if pad_token is None else pad_token
__lowerCamelCase : List[Any] = eos_token if bos_token is None else bos_token
else:
__lowerCamelCase : Optional[int] = '<pad>' if pad_token is None else pad_token
__lowerCamelCase : int = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Tuple = do_lower_case
__lowerCamelCase : Optional[Any] = remove_space
__lowerCamelCase : List[Any] = keep_accents
__lowerCamelCase : Tuple = vocab_file
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
# Used for whitespace normalization in input texts
# fmt : off
__lowerCamelCase : Any = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__lowerCamelCase : str = re.compile(
F"[{''.join(map(SCREAMING_SNAKE_CASE__ ,list(range(0 ,9)) + list(range(1_1 ,3_2)) + list(range(1_2_7 ,1_6_0)) + [1_6_0, 1_7_3, 8_2_0_3]))}]")
def __getstate__( self : str):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Optional[int] = None
return state
def __setstate__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Dict = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCAmelCase ( self : Any):
return len(self.sp_model)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : List[str] = self.non_printing_characters_re.sub('' ,SCREAMING_SNAKE_CASE__)
# Normalize whitespaces
__lowerCamelCase : Any = ''.join([char if char not in self.whitespaces else ' ' for char in text])
# NFC Unicode normalization
__lowerCamelCase : List[str] = unicodedata.normalize('NFC' ,SCREAMING_SNAKE_CASE__)
return text
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : List[str] = self.preprocess_text(SCREAMING_SNAKE_CASE__)
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : str):
return out_string
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Tuple = []
__lowerCamelCase : str = ''
__lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : str = True
__lowerCamelCase : List[str] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any = False):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[int] = self.preprocess_text(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.sp_model.encode(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : List[Any] = [self.preprocess_text(SCREAMING_SNAKE_CASE__) for t in text]
__lowerCamelCase : Optional[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__)
if return_tensors is True or return_tensors == "pt":
__lowerCamelCase : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE__)
return token_ids
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.decode(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : str = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
__lowerCamelCase : Any = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(SCREAMING_SNAKE_CASE__) + F"{self.bos_token}Bot:"
)
return self.encode(text=SCREAMING_SNAKE_CASE__)
| 652 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case )
UpperCAmelCase = np.load(_snake_case )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = StableUnCLIPPipeline
_SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_SCREAMING_SNAKE_CASE : Optional[int] = False
def a ( self : Tuple ) -> Any:
__snake_case = 32
__snake_case = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__snake_case = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE_ , projection_dim=SCREAMING_SNAKE_CASE_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__snake_case = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=SCREAMING_SNAKE_CASE_ , num_layers=1 , )
torch.manual_seed(0 )
__snake_case = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
__snake_case = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE_ )
__snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__snake_case = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=SCREAMING_SNAKE_CASE_ , layers_per_block=1 , upcast_attention=SCREAMING_SNAKE_CASE_ , use_linear_projection=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
__snake_case = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL()
__snake_case = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=0 ) -> Optional[Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a ( self : int ) -> str:
__snake_case = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[Any]:
__snake_case = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Any ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> List[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
__snake_case = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case = pipe('anime turle' , generator=SCREAMING_SNAKE_CASE_ , output_type='np' )
__snake_case = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 56 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = 42
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Tuple , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :int=("DownEncoderBlock2D",) , __UpperCamelCase :Dict=(64,) , __UpperCamelCase :str=2 , __UpperCamelCase :List[str]=32 , __UpperCamelCase :str="silu" , __UpperCamelCase :str=True , ):
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
__UpperCamelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCamelCase ):
A = output_channel
A = block_out_channels[i]
A = i == len(__UpperCamelCase ) - 1
A = get_down_block(
__UpperCamelCase , num_layers=self.layers_per_block , in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__UpperCamelCase , resnet_groups=__UpperCamelCase , attention_head_dim=__UpperCamelCase , temb_channels=__UpperCamelCase , )
self.down_blocks.append(__UpperCamelCase )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__UpperCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCamelCase , temb_channels=__UpperCamelCase , )
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCamelCase , eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] , __UpperCamelCase , 3 , padding=1 )
A = False
def lowerCamelCase ( self :Tuple , __UpperCamelCase :str ):
A = x
A = self.conv_in(__UpperCamelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCamelCase :List[str] ):
def custom_forward(*__UpperCamelCase :Optional[Any] ):
return module(*__UpperCamelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCamelCase ) , __UpperCamelCase , use_reentrant=__UpperCamelCase )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCamelCase , use_reentrant=__UpperCamelCase )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCamelCase ) , __UpperCamelCase )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCamelCase )
else:
# down
for down_block in self.down_blocks:
A = down_block(__UpperCamelCase )
# middle
A = self.mid_block(__UpperCamelCase )
# post-process
A = self.conv_norm_out(__UpperCamelCase )
A = self.conv_act(__UpperCamelCase )
A = self.conv_out(__UpperCamelCase )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self :int , __UpperCamelCase :List[str]=3 , __UpperCamelCase :int=3 , __UpperCamelCase :str=("UpDecoderBlock2D",) , __UpperCamelCase :Tuple=(64,) , __UpperCamelCase :Union[str, Any]=2 , __UpperCamelCase :str=32 , __UpperCamelCase :Union[str, Any]="silu" , __UpperCamelCase :int="group" , ):
super().__init__()
A = layers_per_block
A = nn.Convad(
__UpperCamelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == "spatial" else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__UpperCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCamelCase , temb_channels=__UpperCamelCase , )
# up
A = list(reversed(__UpperCamelCase ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCamelCase ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(__UpperCamelCase ) - 1
A = get_up_block(
__UpperCamelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , prev_output_channel=__UpperCamelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__UpperCamelCase , resnet_groups=__UpperCamelCase , attention_head_dim=__UpperCamelCase , temb_channels=__UpperCamelCase , resnet_time_scale_shift=__UpperCamelCase , )
self.up_blocks.append(__UpperCamelCase )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] , __UpperCamelCase )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCamelCase , eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] , __UpperCamelCase , 3 , padding=1 )
A = False
def lowerCamelCase ( self :str , __UpperCamelCase :Dict , __UpperCamelCase :Union[str, Any]=None ):
A = z
A = self.conv_in(__UpperCamelCase )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCamelCase :Union[str, Any] ):
def custom_forward(*__UpperCamelCase :Union[str, Any] ):
return module(*__UpperCamelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCamelCase , __UpperCamelCase , use_reentrant=__UpperCamelCase )
A = sample.to(__UpperCamelCase )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , use_reentrant=__UpperCamelCase )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCamelCase , __UpperCamelCase )
A = sample.to(__UpperCamelCase )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase )
else:
# middle
A = self.mid_block(__UpperCamelCase , __UpperCamelCase )
A = sample.to(__UpperCamelCase )
# up
for up_block in self.up_blocks:
A = up_block(__UpperCamelCase , __UpperCamelCase )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(__UpperCamelCase )
else:
A = self.conv_norm_out(__UpperCamelCase , __UpperCamelCase )
A = self.conv_act(__UpperCamelCase )
A = self.conv_out(__UpperCamelCase )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Tuple , __UpperCamelCase :List[Any] , __UpperCamelCase :List[str]=None , __UpperCamelCase :Union[str, Any]="random" , __UpperCamelCase :int=False , __UpperCamelCase :Dict=True ):
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
A = n_e
A = sane_index_shape
def lowerCamelCase ( self :Any , __UpperCamelCase :Union[str, Any] ):
A = inds.shape
assert len(__UpperCamelCase ) > 1
A = inds.reshape(ishape[0] , -1 )
A = self.used.to(__UpperCamelCase )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(__UpperCamelCase )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Union[str, Any] ):
A = inds.shape
assert len(__UpperCamelCase ) > 1
A = inds.reshape(ishape[0] , -1 )
A = self.used.to(__UpperCamelCase )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCamelCase )
return back.reshape(__UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 , 2 , 3 , 1 ).contiguous()
A = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(__UpperCamelCase , self.embedding.weight ) , dim=1 )
A = self.embedding(__UpperCamelCase ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A = self.remap_to_used(__UpperCamelCase )
A = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self :Dict , __UpperCamelCase :List[Any] , __UpperCamelCase :Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] , -1 ) # add batch axis
A = self.unmap_to_all(__UpperCamelCase )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(__UpperCamelCase )
if shape is not None:
A = z_q.view(__UpperCamelCase )
# reshape back to match original input shape
A = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :int , __UpperCamelCase :Dict=False ):
A = parameters
A, A = torch.chunk(__UpperCamelCase , 2 , dim=1 )
A = torch.clamp(self.logvar , -30.0 , 20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape , generator=__UpperCamelCase , device=self.parameters.device , dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Any , __UpperCamelCase :Tuple=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
return self.mean
| 524 |
"""simple docstring"""
from itertools import permutations
def A__ ( UpperCamelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def A__ ( UpperCamelCase = 10 ):
return sum(
int("".join(map(UpperCamelCase , UpperCamelCase ) ) )
for num in permutations(range(UpperCamelCase ) )
if is_substring_divisible(UpperCamelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 524 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Union[str, Any] = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
A__ : Tuple = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ) -> Dict:
__lowerCamelCase : Optional[Any] = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__lowerCamelCase : Optional[int] = int(re.match(R'.*layer_(\d*).*' , UpperCAmelCase_ )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] ) -> int:
if dtype == torch.bool:
return 1 / 8
__lowerCamelCase : Optional[Any] = re.search(R'[^\d](\d+)$' , str(UpperCAmelCase_ ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
__lowerCamelCase : List[str] = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ) -> Dict:
# Construct model
if bloom_config_file == "":
__lowerCamelCase : Union[str, Any] = BloomConfig()
else:
__lowerCamelCase : List[str] = BloomConfig.from_json_file(UpperCAmelCase_ )
if shard_model:
__lowerCamelCase : Dict = os.listdir(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = sorted(filter(lambda UpperCAmelCase_ : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase_ ) )
__lowerCamelCase : Any = {'weight_map': {}, 'metadata': {}}
__lowerCamelCase : int = 0
__lowerCamelCase : int = None
__lowerCamelCase : Dict = BloomConfig()
for j, file in enumerate(UpperCAmelCase_ ):
print('Processing file: {}'.format(UpperCAmelCase_ ) )
__lowerCamelCase : Optional[Any] = None
for i in range(UpperCAmelCase_ ):
# load all TP files
__lowerCamelCase : Optional[int] = file.replace('model_00' , F'model_0{i}' )
__lowerCamelCase : Any = torch.load(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
__lowerCamelCase : Dict = list(temp.keys() )
for key in keys:
__lowerCamelCase : Optional[Any] = temp.pop(UpperCAmelCase_ )
if tensors is None:
__lowerCamelCase : List[str] = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCamelCase : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCamelCase : str = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCamelCase : List[str] = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase_ , os.path.join(
UpperCAmelCase_ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__lowerCamelCase : Tuple = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__lowerCamelCase : str = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase_ ) ).zfill(5 ) )
__lowerCamelCase : List[Any] = BloomConfig()
__lowerCamelCase : List[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
__lowerCamelCase : str = total_size
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase_ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
__lowerCamelCase : Tuple = json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_ ) + '\n'
f.write(UpperCAmelCase_ )
else:
__lowerCamelCase : str = BloomModel(UpperCAmelCase_ )
__lowerCamelCase : List[Any] = os.listdir(UpperCAmelCase_ )
__lowerCamelCase : Tuple = sorted(filter(lambda UpperCAmelCase_ : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase_ ) )
__lowerCamelCase : List[str] = None
for i, file in enumerate(UpperCAmelCase_ ):
__lowerCamelCase : Union[str, Any] = None
for i in range(UpperCAmelCase_ ):
# load all TP files
__lowerCamelCase : Optional[Any] = file.replace('model_00' , F'model_0{i}' )
__lowerCamelCase : List[str] = torch.load(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
__lowerCamelCase : List[Any] = list(temp.keys() )
for key in keys:
__lowerCamelCase : int = temp.pop(UpperCAmelCase_ )
if tensors is None:
__lowerCamelCase : List[str] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCamelCase : Any = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCamelCase : int = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCamelCase : Union[str, Any] = tensors[key] / pretraining_tp
__lowerCamelCase : int = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
__lowerCamelCase : str = set(other_keys.missing_keys )
else:
__lowerCamelCase : int = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__lowerCamelCase : List[str] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
__lowerCamelCase : Dict = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
A__ : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 13 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 | 1 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_UpperCamelCase : Optional[int] = 'src/diffusers'
# Matches is_xxx_available()
_UpperCamelCase : str = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
_UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
_UpperCamelCase : Optional[int] = '\n{0} = None\n'
_UpperCamelCase : Optional[int] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
_UpperCamelCase : Optional[Any] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] ):
'''simple docstring'''
lowercase = _re_backend.findall(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE__ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
# Get to the point we do the actual imports for type checking
lowercase = 0
lowercase = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowercase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE__ ) and len(lines[line_index] ) > 1:
lowercase = lines[line_index]
lowercase = _re_single_line_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowercase = objects
else:
line_index += 1
return backend_specific_objects
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Tuple ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE__ )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int]=None ):
'''simple docstring'''
if backend_specific_objects is None:
lowercase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowercase = {}
for backend, objects in backend_specific_objects.items():
lowercase = """[""" + """, """.join(f'"{b}"' for b in backend.split('_and_' ) ) + """]"""
lowercase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for o in objects] )
lowercase = dummy_file
return dummy_files
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict=False ):
'''simple docstring'''
lowercase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowercase = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowercase = os.path.join(SCREAMING_SNAKE_CASE__ , 'utils' )
lowercase = {
backend: os.path.join(SCREAMING_SNAKE_CASE__ , f'dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py' )
for backend in dummy_files.keys()
}
lowercase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.read()
else:
lowercase = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
_UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase : Dict = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 704 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a :
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = inputs['prompt']
lowercase = inputs['generator']
lowercase = inputs['num_inference_steps']
lowercase = inputs['output_type']
if "image" in inputs:
lowercase = inputs['image']
else:
lowercase = None
if "mask_image" in inputs:
lowercase = inputs['mask_image']
else:
lowercase = None
if "original_image" in inputs:
lowercase = inputs['original_image']
else:
lowercase = None
lowercase , lowercase = pipe.encode_prompt(_lowerCamelCase )
# inputs with prompt converted to embeddings
lowercase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
lowercase = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = inputs['generator']
lowercase = inputs['num_inference_steps']
lowercase = inputs['output_type']
# inputs with prompt converted to embeddings
lowercase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
lowercase = pipe_loaded(**_lowerCamelCase )[0]
lowercase = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
def UpperCamelCase_ ( self ):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
lowercase = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = pipe_loaded(**_lowerCamelCase )[0]
lowercase = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
| 134 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__magic_name__ = True
except (ImportError, ModuleNotFoundError):
__magic_name__ = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def _lowerCAmelCase ( UpperCamelCase_ ):
re.sub("""<n>""" , """""" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 155 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 155 | 1 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : str = "▁" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[str, AddedToken] = "<unk>" , UpperCamelCase_ : Union[str, AddedToken] = "</s>" , UpperCamelCase_ : Union[str, AddedToken] = "<pad>" , ) -> Any:
'''simple docstring'''
_lowercase : Tuple = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
_lowercase : Tuple = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_lowercase : str = token_dict['token']
_lowercase : Optional[Any] = Tokenizer(Unigram() )
_lowercase : Dict = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
_lowercase : str = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase ),
pre_tokenizers.Digits(individual_digits=__UpperCamelCase ),
pre_tokenizers.Punctuation(),
] )
_lowercase : int = decoders.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_lowercase : int = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
_lowercase : Tuple = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : int = 8000 , UpperCamelCase_ : bool = True , ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowercase : str = [files]
self._tokenizer.train(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Union[Iterator[str], Iterator[Iterator[str]]] , UpperCamelCase_ : int = 8000 , UpperCamelCase_ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
self._tokenizer.train_from_iterator(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = json.loads(self._tokenizer.to_str() )
_lowercase : int = self.special_tokens['unk']['id']
_lowercase : Optional[int] = Tokenizer.from_str(json.dumps(__UpperCamelCase ) )
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_snake_case : Optional[Any] = False
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Optional[Any]=32 ) -> Dict:
set_seed(0 )
__snake_case : int = UNetaDModel(sample_size=lowerCamelCase , in_channels=3 , out_channels=3 )
__snake_case : List[Any] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__snake_case : Optional[Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCamelCase , )
__snake_case : Tuple = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCamelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__snake_case : Tuple = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCamelCase ) for _ in range(4 )]
__snake_case : str = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase ) for _ in range(4 )]
__snake_case : List[Any] = [torch.randint(0 , 1000 , (4,) ).long().to(lowerCamelCase ) for _ in range(4 )]
# train with a DDPM scheduler
__snake_case , __snake_case : str = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase )
for i in range(4 ):
optimizer.zero_grad()
__snake_case : Optional[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__snake_case : List[str] = model(lowerCamelCase , timesteps[i] ).sample
__snake_case : Tuple = torch.nn.functional.mse_loss(lowerCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__snake_case , __snake_case : Optional[int] = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase )
for i in range(4 ):
optimizer.zero_grad()
__snake_case : str = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__snake_case : Any = model(lowerCamelCase , timesteps[i] ).sample
__snake_case : Tuple = torch.nn.functional.mse_loss(lowerCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
| 81 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE : str =0
__SCREAMING_SNAKE_CASE : int =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE : str =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE : Any =tuple[int, int]
class A_ :
def __init__( self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : Node | None , ):
lowercase = pos_x
lowercase = pos_y
lowercase = (pos_y, pos_x)
lowercase = goal_x
lowercase = goal_y
lowercase = g_cost
lowercase = parent
lowercase = self.calculate_heuristic()
lowercase = self.g_cost + self.h_cost
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = self.pos_x - self.goal_x
lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case__ ) + abs(snake_case__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , snake_case__ : Node ):
return self.f_cost < other.f_cost
class A_ :
def __init__( self : Any , snake_case__ : TPosition , snake_case__ : TPosition ):
lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case__ )
lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , snake_case__ )
lowercase = [self.start]
lowercase = []
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case__ )
self.closed_nodes.append(snake_case__ )
lowercase = self.get_successors(snake_case__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case__ )
else:
# retrieve the best current path
lowercase = self.open_nodes.pop(self.open_nodes.index(snake_case__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case__ )
else:
self.open_nodes.append(snake_case__ )
return [self.start.pos]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Node ):
lowercase = []
for action in delta:
lowercase = parent.pos_x + action[1]
lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case__ , snake_case__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case__ , ) )
return successors
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Node | None ):
lowercase = node
lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[int] , snake_case__ : TPosition , snake_case__ : TPosition ):
lowercase = AStar(snake_case__ , snake_case__ )
lowercase = AStar(snake_case__ , snake_case__ )
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowercase = self.fwd_astar.open_nodes.pop(0 )
lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case__ , snake_case__ )
self.fwd_astar.closed_nodes.append(snake_case__ )
self.bwd_astar.closed_nodes.append(snake_case__ )
lowercase = current_bwd_node
lowercase = current_fwd_node
lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case__ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case__ )
else:
# retrieve the best current path
lowercase = astar.open_nodes.pop(
astar.open_nodes.index(snake_case__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case__ )
else:
astar.open_nodes.append(snake_case__ )
return [self.fwd_astar.start.pos]
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Node , snake_case__ : Node ):
lowercase = self.fwd_astar.retrace_path(snake_case__ )
lowercase = self.bwd_astar.retrace_path(snake_case__ )
bwd_path.pop()
bwd_path.reverse()
lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE : str =(0, 0)
__SCREAMING_SNAKE_CASE : Union[str, Any] =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE : Any =time.time()
__SCREAMING_SNAKE_CASE : Optional[Any] =AStar(init, goal)
__SCREAMING_SNAKE_CASE : int =a_star.search()
__SCREAMING_SNAKE_CASE : Optional[int] =time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__SCREAMING_SNAKE_CASE : Optional[int] =time.time()
__SCREAMING_SNAKE_CASE : Dict =BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE : Tuple =time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 428 | 0 |
import argparse
import struct
import unittest
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[str]:
_A = data
# Initialize hash values
_A = [
0x6a_09e_667,
0xbb_67a_e85,
0x3c_6ef_372,
0xa5_4ff_53a,
0x51_0e5_27f,
0x9b_056_88c,
0x1f_83d_9ab,
0x5b_e0c_d19,
]
# Initialize round constants
_A = [
0x42_8a2_f98,
0x71_374_491,
0xb5_c0f_bcf,
0xe9_b5d_ba5,
0x39_56c_25b,
0x59_f11_1f1,
0x92_3f8_2a4,
0xab_1c5_ed5,
0xd8_07a_a98,
0x12_835_b01,
0x24_318_5be,
0x55_0c7_dc3,
0x72_be5_d74,
0x80_deb_1fe,
0x9b_dc0_6a7,
0xc1_9bf_174,
0xe4_9b6_9c1,
0xef_be4_786,
0x0f_c19_dc6,
0x24_0ca_1cc,
0x2d_e92_c6f,
0x4a_748_4aa,
0x5c_b0a_9dc,
0x76_f98_8da,
0x98_3e5_152,
0xa8_31c_66d,
0xb0_032_7c8,
0xbf_597_fc7,
0xc6_e00_bf3,
0xd5_a79_147,
0x06_ca6_351,
0x14_292_967,
0x27_b70_a85,
0x2e_1b2_138,
0x4d_2c6_dfc,
0x53_380_d13,
0x65_0a7_354,
0x76_6a0_abb,
0x81_c2c_92e,
0x92_722_c85,
0xa2_bfe_8a1,
0xa8_1a6_64b,
0xc2_4b8_b70,
0xc7_6c5_1a3,
0xd1_92e_819,
0xd6_990_624,
0xf4_0e3_585,
0x10_6aa_070,
0x19_a4c_116,
0x1e_376_c08,
0x27_487_74c,
0x34_b0b_cb5,
0x39_1c0_cb3,
0x4e_d8a_a4a,
0x5b_9cc_a4f,
0x68_2e6_ff3,
0x74_8f8_2ee,
0x78_a56_36f,
0x84_c87_814,
0x8c_c70_208,
0x90_bef_ffa,
0xa4_506_ceb,
0xbe_f9a_3f7,
0xc6_717_8f2,
]
_A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Any:
_A = b"""\x80""" + (b"""\x00""" * (63 - (len(A__ ) + 8) % 64))
_A = struct.pack(""">Q""" , (len(A__ ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase ( self ) -> Optional[Any]:
# Convert into blocks of 64 bytes
_A = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_A = list(struct.unpack(""">16L""" , A__ ) )
# add 48 0-ed integers
words += [0] * 48
_A = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_A = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_A = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
_A = self.ror(A__ , 6 ) ^ self.ror(A__ , 11 ) ^ self.ror(A__ , 25 )
_A = (e & f) ^ ((~e & 0xff_fff_fff) & g)
_A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
_A = self.ror(A__ , 2 ) ^ self.ror(A__ , 13 ) ^ self.ror(A__ , 22 )
_A = (a & b) ^ (a & c) ^ (b & c)
_A = (sa + maj) % 0x100_000_000
_A = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
_A = [a, b, c, d, e, f, g, h]
# Modify final values
_A = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
_A = """""".join([hex(A__ )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
return 0xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
import hashlib
_A = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(A__ ).hash , hashlib.shaaaa(A__ ).hexdigest() )
def snake_case ( ) -> None:
import doctest
doctest.testmod()
_A = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""")
_A = parser.parse_args()
_A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""") as f:
_A = f.read()
else:
_A = bytes(lowercase_ , """utf-8""")
print(SHAaaa(lowercase_).hash)
if __name__ == "__main__":
main()
| 720 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : Union[str, Any] ) -> list[list[int]]:
"""simple docstring"""
__UpperCamelCase = []
if len(A__ ) == 1:
return [nums.copy()]
for _ in range(len(A__ ) ):
__UpperCamelCase = nums.pop(0 )
__UpperCamelCase = permute(A__ )
for perm in permutations:
perm.append(A__ )
result.extend(A__ )
nums.append(A__ )
return result
def lowercase__ ( __lowercase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
def backtrack(__lowercase : Tuple ):
if start == len(A__ ) - 1:
output.append(nums[:] )
else:
for i in range(A__ , len(A__ ) ):
__UpperCamelCase = nums[i], nums[start]
backtrack(start + 1 )
__UpperCamelCase = nums[i], nums[start] # backtrack
__UpperCamelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
a__ : Union[str, Any] =permutea([1, 2, 3])
print(res)
doctest.testmod()
| 399 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a__ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowercase , cache_dir=lowercase)
a__ : Any = [t[-1] for t in os.walk(os.path.join(lowercase , os.listdir(lowercase)[0] , 'snapshots'))]
a__ : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowercase)
a__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : Tuple = jax.random.PRNGKey(0)
a__ : str = 4
a__ : Dict = jax.device_count()
a__ : List[Any] = num_samples * [prompt]
a__ : Any = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : str = replicate(lowercase)
a__ : Dict = jax.random.split(lowercase , lowercase)
a__ : Dict = shard(lowercase)
a__ : List[str] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1e-3
assert np.abs(np.abs(lowercase , dtype=np.floataa).sum() - 4_99_47.8_75) < 5e-1
a__ : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(lowercase) == num_samples
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : int = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=lowercase)
a__ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : List[str] = jax.random.PRNGKey(0)
a__ : Any = 50
a__ : Tuple = jax.device_count()
a__ : Optional[int] = num_samples * [prompt]
a__ : Optional[int] = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : List[Any] = replicate(lowercase)
a__ : int = jax.random.split(lowercase , lowercase)
a__ : Optional[int] = shard(lowercase)
a__ : str = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5e-1
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase)
a__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : Optional[int] = jax.random.PRNGKey(0)
a__ : Dict = 50
a__ : List[Any] = jax.device_count()
a__ : Dict = num_samples * [prompt]
a__ : Any = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : Optional[Any] = replicate(lowercase)
a__ : List[Any] = jax.random.split(lowercase , lowercase)
a__ : Optional[Any] = shard(lowercase)
a__ : Optional[Any] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5e-1
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
a__ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : List[Any] = jax.random.PRNGKey(0)
a__ : List[Any] = 50
a__ : int = jax.device_count()
a__ : Tuple = num_samples * [prompt]
a__ : Dict = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : int = replicate(lowercase)
a__ : List[str] = jax.random.split(lowercase , lowercase)
a__ : Optional[Any] = shard(lowercase)
a__ : Tuple = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5e-1
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=lowercase , steps_offset=1 , )
a__ , a__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=lowercase , safety_checker=lowercase , )
a__ : str = scheduler.create_state()
a__ : List[str] = scheduler_state
a__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : List[str] = jax.random.PRNGKey(0)
a__ : List[Any] = 50
a__ : Tuple = jax.device_count()
a__ : List[Any] = num_samples * [prompt]
a__ : List[Any] = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : List[Any] = replicate(lowercase)
a__ : Any = jax.random.split(lowercase , lowercase)
a__ : Optional[int] = shard(lowercase)
a__ : Optional[int] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5e-1
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : Optional[Any] = jax.device_count()
a__ : List[str] = num_samples * [prompt]
a__ : List[str] = jax.random.split(jax.random.PRNGKey(0) , lowercase)
a__ , a__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase , )
a__ : List[str] = replicate(lowercase)
a__ : int = pipeline.prepare_inputs(lowercase)
a__ : Dict = shard(lowercase)
a__ : Tuple = pipeline(lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
a__ : Tuple = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
a__ , a__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase , use_memory_efficient_attention=lowercase , )
a__ : int = replicate(lowercase)
a__ : str = pipeline.prepare_inputs(lowercase)
a__ : Dict = shard(lowercase)
a__ : int = pipeline(lowercase , lowercase , lowercase , jit=lowercase).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
a__ : Dict = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2
| 302 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = "x" , lowercase__ = 10**-10 , lowercase__ = 1 , ) -> complex:
__lowercase = symbols(lowercase__ )
__lowercase = lambdify(lowercase__ , lowercase__ )
__lowercase = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) )
__lowercase = starting_point
while True:
if diff_function(lowercase__ ) != 0:
__lowercase = prev_guess - multiplicity * func(lowercase__ ) / diff_function(
lowercase__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowercase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 701 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = LxmertTokenizer
lowercase__ : List[str] = LxmertTokenizerFast
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , lowercase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def snake_case__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
| 634 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
a_ :Optional[int] = logging.getLogger()
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
return args.f
def a ( A__ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(A__ , '''all_results.json''' )
if os.path.exists(A__ ):
with open(A__ , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = json.load(A__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
a_ :Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( _UpperCAmelCase ):
@classmethod
def lowercase__ ( cls : int ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE__ : Any = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowercase__ ( cls : Any ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Any = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : str = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : int = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : List[Any] = get_results(_lowercase )
self.assertLess(result['''perplexity'''] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_results(_lowercase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
SCREAMING_SNAKE_CASE__ : str = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE__ : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Optional[Any] = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : List[Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Dict = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Any = get_results(_lowercase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : str = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Any = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Tuple = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : List[Any] = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : str = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Any = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''translation_no_trainer''' ) ) )
@slow
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : int = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : List[str] = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Any = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Dict = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_results(_lowercase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''image_classification_no_trainer''' ) ) )
| 35 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = PRETRAINED_INIT_CONFIGURATION
lowercase__ : int = ['input_ids', 'attention_mask']
lowercase__ : Tuple = DistilBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**lowerCamelCase__ )
_lowerCamelCase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 661 | 0 |
from __future__ import annotations
from fractions import Fraction
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = 1_1
_lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
_lowerCamelCase = 1_0
return solutions
def _lowerCamelCase ( _a = 2 ):
"""simple docstring"""
_lowerCamelCase = 1.0
for fraction in fraction_list(_lowerCamelCase ):
_lowerCamelCase = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 704 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=2 , a__=56 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=2 , a__=7 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=4 , a__="block_sparse" , a__=True , a__=False , a__=2 , a__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def _UpperCAmelCase ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class __magic_name__ ( lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_UpperCamelCase = False
_UpperCamelCase = False
def _UpperCAmelCase ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ):
super().test_hidden_states_output()
@slow
def _UpperCAmelCase ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(a__ )
def _UpperCAmelCase ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(a__ , a__ )
_lowerCamelCase = model_class(a__ )
@jax.jit
def model_jitted(a__ , a__=None , **a__ ):
return model(input_ids=a__ , attention_mask=a__ , **a__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__=1E-5 , a__="outputs" , a__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(a__ , a__ , a__ , a__ , a__ , a__ )
| 297 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''DPTFeatureExtractor''']
__lowerCAmelCase = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import argparse
import os
import re
lowerCamelCase_ = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase_ = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
lowerCamelCase_ = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCamelCase ( a_ : Optional[Any] , a_ : bool = False ) -> Tuple:
with open(a_ , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE :Union[str, Any] = f.read()
__SCREAMING_SNAKE_CASE :Dict = content.split('''\n''' )
__SCREAMING_SNAKE_CASE :List[Any] = []
__SCREAMING_SNAKE_CASE :Optional[int] = 0
while line_idx < len(a_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__SCREAMING_SNAKE_CASE :str = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__SCREAMING_SNAKE_CASE :Dict = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__SCREAMING_SNAKE_CASE :List[str] = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__SCREAMING_SNAKE_CASE :Optional[Any] = sorted(a_ , key=lambda a_ : _re_identifier.search(a_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(a_ ) )
elif "\n".join(a_ ) != content:
return True
def __lowerCamelCase ( a_ : bool = False ) -> int:
__SCREAMING_SNAKE_CASE :str = [os.path.join(a_ , a_ ) for f in os.listdir(a_ ) if f.endswith('''.py''' )]
__SCREAMING_SNAKE_CASE :List[str] = [sort_auto_mapping(a_ , overwrite=a_ ) for fname in fnames]
if not overwrite and any(a_ ):
__SCREAMING_SNAKE_CASE :str = [f for f, d in zip(a_ , a_ ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(a_ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowerCamelCase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only) | 498 | 0 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 179 | '''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=99 , lowercase__=64 , lowercase__=5 , lowercase__=4 , lowercase__=64 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def _UpperCamelCase ( self ) -> Union[str, Any]:
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> Tuple:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = MPNetModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = MPNetForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MPNetForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE : Any = MPNetForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : str = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : int = True
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : int = MPNetModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Tuple = MPNetModel.from_pretrained('microsoft/mpnet-base' )
SCREAMING_SNAKE_CASE : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase__ )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1E-4 ) )
| 179 | 1 |
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
snake_case_ = [p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
snake_case_ = sorted(lowercase__ )
# declaring useful variables
snake_case_ = len(lowercase__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
snake_case_ = sorted_profit_by_weight[length - i - 1]
snake_case_ = profit_by_weight.index(lowercase__ )
snake_case_ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
A = [int(x) for x in input('Input profits separated by spaces: ').split()]
A = [int(x) for x in input('Input weights separated by spaces: ').split()]
A = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 187 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", F"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", F"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qpos_proj.weight", F"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kpos_proj.weight", F"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.weight", F"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", F"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", F"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kpos_proj.weight", F"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.weight", F"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", F"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", F"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", F"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_qpos_proj.bias", F"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_kpos_proj.bias", F"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.bias", F"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", F"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", F"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_kpos_proj.bias", F"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.bias", F"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", F"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
def __UpperCAmelCase ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' )
lowercase = value
else:
lowercase = value
return new_state_dict
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase=False )-> Tuple:
"""simple docstring"""
lowercase = ''''''
if is_panoptic:
lowercase = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowercase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[:256, :]
lowercase = in_proj_bias[:256]
lowercase = in_proj_weight[256:512, :]
lowercase = in_proj_bias[256:512]
lowercase = in_proj_weight[-256:, :]
lowercase = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Any:
"""simple docstring"""
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = Image.open(requests.get(UpperCAmelCase, stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Tuple:
"""simple docstring"""
lowercase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase = '''resnet101'''
if "dc5" in model_name:
lowercase = True
lowercase = '''panoptic''' in model_name
if is_panoptic:
lowercase = 250
else:
lowercase = 91
lowercase = '''huggingface/label-files'''
lowercase = '''coco-detection-id2label.json'''
lowercase = json.load(open(hf_hub_download(UpperCAmelCase, UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
lowercase = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
# load image processor
lowercase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase = ConditionalDetrImageProcessor(format=UpperCAmelCase )
# prepare image
lowercase = prepare_img()
lowercase = image_processor(images=UpperCAmelCase, return_tensors='''pt''' )
lowercase = encoding['''pixel_values''']
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
lowercase = torch.hub.load('''DeppMeng/ConditionalDETR''', UpperCAmelCase, pretrained=UpperCAmelCase ).eval()
lowercase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase = '''conditional_detr.''' + src
rename_key(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
lowercase = rename_backbone_keys(UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase, is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
# finally, create HuggingFace model and load state dict
lowercase = ConditionalDetrForSegmentation(UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=UpperCAmelCase, organization='''DepuMeng''', commit_message='''Add model''' )
# verify our conversion
lowercase = conditional_detr(UpperCAmelCase )
lowercase = model(UpperCAmelCase )
assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1e-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1e-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 604 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = 1
_lowerCAmelCase:Union[str, Any] = 3
_lowerCAmelCase:Union[str, Any] = (32, 32)
_lowerCAmelCase:int = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0)).to(a__)
return image
@property
def __UpperCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
torch.manual_seed(0)
_lowerCAmelCase:int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,)
return model
@property
def __UpperCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowerCAmelCase:Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def __UpperCamelCase ( self : str) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
_lowerCAmelCase:Optional[Any] = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5006 ,)
return RobertaSeriesModelWithTransformation(a__)
@property
def __UpperCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
def extract(*a__ : List[str] ,**a__ : List[Any]):
class a__ :
def __init__( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = torch.ones([0])
def __UpperCamelCase ( self : List[Any] ,a__ : str) -> List[Any]:
"""simple docstring"""
self.pixel_values.to(a__)
return self
return Out()
return extract
def __UpperCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase:Union[str, Any] = self.dummy_cond_unet
_lowerCAmelCase:Any = PNDMScheduler(skip_prk_steps=a__)
_lowerCAmelCase:Optional[int] = self.dummy_vae
_lowerCAmelCase:List[str] = self.dummy_text_encoder
_lowerCAmelCase:List[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
_lowerCAmelCase:Tuple = 77
_lowerCAmelCase:List[Any] = self.dummy_image.to(a__)
_lowerCAmelCase:Any = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowerCAmelCase:Tuple = AltDiffusionImgaImgPipeline(
unet=a__ ,scheduler=a__ ,vae=a__ ,text_encoder=a__ ,tokenizer=a__ ,safety_checker=a__ ,feature_extractor=self.dummy_extractor ,)
_lowerCAmelCase:str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=a__)
_lowerCAmelCase:Union[str, Any] = alt_pipe.to(a__)
alt_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:str = '''A painting of a squirrel eating a burger'''
_lowerCAmelCase:int = torch.Generator(device=a__).manual_seed(0)
_lowerCAmelCase:str = alt_pipe(
[prompt] ,generator=a__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=a__ ,)
_lowerCAmelCase:Dict = output.images
_lowerCAmelCase:Dict = torch.Generator(device=a__).manual_seed(0)
_lowerCAmelCase:Optional[int] = alt_pipe(
[prompt] ,generator=a__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=a__ ,return_dict=a__ ,)[0]
_lowerCAmelCase:Any = image[0, -3:, -3:, -1]
_lowerCAmelCase:Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase:Union[str, Any] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''')
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
_lowerCAmelCase:Dict = self.dummy_cond_unet
_lowerCAmelCase:List[Any] = PNDMScheduler(skip_prk_steps=a__)
_lowerCAmelCase:Dict = self.dummy_vae
_lowerCAmelCase:Tuple = self.dummy_text_encoder
_lowerCAmelCase:Union[str, Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
_lowerCAmelCase:List[Any] = 77
_lowerCAmelCase:Any = self.dummy_image.to(a__)
# put models in fp16
_lowerCAmelCase:List[Any] = unet.half()
_lowerCAmelCase:List[str] = vae.half()
_lowerCAmelCase:List[str] = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase:List[str] = AltDiffusionImgaImgPipeline(
unet=a__ ,scheduler=a__ ,vae=a__ ,text_encoder=a__ ,tokenizer=a__ ,safety_checker=a__ ,feature_extractor=self.dummy_extractor ,)
_lowerCAmelCase:Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=a__)
_lowerCAmelCase:Any = alt_pipe.to(a__)
alt_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:List[str] = '''A painting of a squirrel eating a burger'''
_lowerCAmelCase:str = torch.manual_seed(0)
_lowerCAmelCase:str = alt_pipe(
[prompt] ,generator=a__ ,num_inference_steps=2 ,output_type='''np''' ,image=a__ ,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''')
def __UpperCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_lowerCAmelCase:List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCAmelCase:Dict = init_image.resize((760, 504))
_lowerCAmelCase:List[Any] = '''BAAI/AltDiffusion'''
_lowerCAmelCase:List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
a__ ,safety_checker=a__ ,)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
pipe.enable_attention_slicing()
_lowerCAmelCase:Optional[int] = '''A fantasy landscape, trending on artstation'''
_lowerCAmelCase:int = torch.manual_seed(0)
_lowerCAmelCase:List[str] = pipe(
prompt=a__ ,image=a__ ,strength=0.75 ,guidance_scale=7.5 ,generator=a__ ,output_type='''np''' ,)
_lowerCAmelCase:List[str] = output.images[0]
_lowerCAmelCase:Union[str, Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_lowerCAmelCase:Tuple = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
_lowerCAmelCase:Tuple = init_image.resize((768, 512))
_lowerCAmelCase:int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''')
_lowerCAmelCase:Optional[int] = '''BAAI/AltDiffusion'''
_lowerCAmelCase:Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
a__ ,safety_checker=a__ ,)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
pipe.enable_attention_slicing()
_lowerCAmelCase:Tuple = '''A fantasy landscape, trending on artstation'''
_lowerCAmelCase:List[Any] = torch.manual_seed(0)
_lowerCAmelCase:Any = pipe(
prompt=a__ ,image=a__ ,strength=0.75 ,guidance_scale=7.5 ,generator=a__ ,output_type='''np''' ,)
_lowerCAmelCase:List[Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 439 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class a__ ( UpperCamelCase_ ):
def __init__( self : int ,*a__ : Optional[Any] ,**a__ : Union[str, Any]) -> Tuple:
"""simple docstring"""
super().__init__(*a__ ,**a__)
requires_backends(self ,'''vision''')
self.check_model_type(a__)
def __call__( self : str ,a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**a__ : List[str]) -> Optional[int]:
"""simple docstring"""
return super().__call__(a__ ,**a__)
def __UpperCamelCase ( self : Union[str, Any] ,**a__ : List[Any]) -> Any:
"""simple docstring"""
return {}, {}, {}
def __UpperCamelCase ( self : Tuple ,a__ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = load_image(a__)
_lowerCAmelCase:int = image.size
_lowerCAmelCase:int = self.image_processor(images=a__ ,return_tensors=self.framework)
return model_inputs
def __UpperCamelCase ( self : Dict ,a__ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Any = self.model(**a__)
return model_outputs
def __UpperCamelCase ( self : List[Any] ,a__ : Dict) -> Any:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = model_outputs.predicted_depth
_lowerCAmelCase:Union[str, Any] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=a__)
_lowerCAmelCase:List[str] = prediction.squeeze().cpu().numpy()
_lowerCAmelCase:Any = (output * 255 / np.max(a__)).astype('''uint8''')
_lowerCAmelCase:Dict = Image.fromarray(a__)
_lowerCAmelCase:Tuple = {}
_lowerCAmelCase:Optional[int] = predicted_depth
_lowerCAmelCase:str = depth
return output_dict
| 439 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase_ = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase ( a_="no" , a_ = default_json_config_file , a_ = False ) -> Tuple:
lowerCAmelCase_ = Path(a_ )
path.parent.mkdir(parents=a_ , exist_ok=a_ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowerCAmelCase_ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowerCAmelCase_ = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase_ = torch.cuda.device_count()
lowerCAmelCase_ = num_gpus
lowerCAmelCase_ = False
if num_gpus > 1:
lowerCAmelCase_ = 'MULTI_GPU'
else:
lowerCAmelCase_ = 'NO'
elif is_xpu_available() and use_xpu:
lowerCAmelCase_ = torch.xpu.device_count()
lowerCAmelCase_ = num_xpus
lowerCAmelCase_ = False
if num_xpus > 1:
lowerCAmelCase_ = 'MULTI_XPU'
else:
lowerCAmelCase_ = 'NO'
elif is_npu_available():
lowerCAmelCase_ = torch.npu.device_count()
lowerCAmelCase_ = num_npus
lowerCAmelCase_ = False
if num_npus > 1:
lowerCAmelCase_ = 'MULTI_NPU'
else:
lowerCAmelCase_ = 'NO'
else:
lowerCAmelCase_ = 0
lowerCAmelCase_ = True
lowerCAmelCase_ = 1
lowerCAmelCase_ = 'NO'
lowerCAmelCase_ = ClusterConfig(**a_ )
config.to_json_file(a_ )
return path
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = parser.add_parser('default' , parents=a_ , help=a_ , formatter_class=a_ )
parser.add_argument(
'--config_file' , default=a_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=a_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=a_ )
return parser
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 318 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCamelCase ( a_=None ) -> List[str]:
if subparsers is not None:
lowerCAmelCase_ = subparsers.add_parser('test' )
else:
lowerCAmelCase_ = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=a_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCAmelCase_ = script_name
else:
lowerCAmelCase_ = F'''--config_file={args.config_file} {script_name}'''
lowerCAmelCase_ = ['accelerate-launch'] + test_args.split()
lowerCAmelCase_ = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def lowerCamelCase ( ) -> Optional[Any]:
lowerCAmelCase_ = test_command_parser()
lowerCAmelCase_ = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 318 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__SCREAMING_SNAKE_CASE =""" def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
SCREAMING_SNAKE_CASE_ = self.transformer_dir
shutil.copy(
os.path.join(_lowerCamelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def _A ( self: Dict , _lowerCamelCase: Optional[int] , _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: str=None ):
SCREAMING_SNAKE_CASE_ = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_ = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
SCREAMING_SNAKE_CASE_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
SCREAMING_SNAKE_CASE_ = black.format_str(_lowerCamelCase , mode=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(_lowerCamelCase , '''w''' , newline='''\n''' ) as f:
f.write(_lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCamelCase )
with open(_lowerCamelCase , '''r''' ) as f:
self.assertTrue(f.read() , _lowerCamelCase )
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _A ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _lowerCamelCase ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , _lowerCamelCase , _lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _lowerCamelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _lowerCamelCase ) , )
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
self.assertFalse(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 718 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Dict = ["image"]
SCREAMING_SNAKE_CASE__ : List[Any] = ["image"]
SCREAMING_SNAKE_CASE__ : List[Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = False
@property
def _A ( self: Optional[Any] ):
return 32
@property
def _A ( self: Optional[int] ):
return 32
@property
def _A ( self: List[Any] ):
return self.time_input_dim * 4
@property
def _A ( self: Any ):
return 8
@property
def _A ( self: int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE_ = CLIPVisionModel(_lowerCamelCase )
return model
@property
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_lowerCamelCase , do_normalize=_lowerCamelCase , do_resize=_lowerCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_24 , )
return image_processor
@property
def _A ( self: Dict ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE_ = PriorTransformer(**_lowerCamelCase )
return model
@property
def _A ( self: List[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_ = ShapERenderer(**_lowerCamelCase )
return model
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.dummy_prior
SCREAMING_SNAKE_CASE_ = self.dummy_image_encoder
SCREAMING_SNAKE_CASE_ = self.dummy_image_processor
SCREAMING_SNAKE_CASE_ = self.dummy_renderer
SCREAMING_SNAKE_CASE_ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=_lowerCamelCase , clip_sample=_lowerCamelCase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def _A ( self: Optional[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[Any]=0 ):
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self: str ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCamelCase , relax_max_difference=_lowerCamelCase , )
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_ = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_ = pipe(**_lowerCamelCase , num_images_per_prompt=_lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def _A ( self: Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE_ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 89 | 0 |
def __UpperCAmelCase ( __a : list[int] ,__a : list[int] ) -> None:
"""simple docstring"""
_a : List[Any] = len(__a )
print('''The following activities are selected:''' )
# The first activity is always selected
_a : List[Any] = 0
print(__a ,end=''',''' )
# Consider rest of the activities
for j in range(__a ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__a ,end=''',''' )
_a : Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = [1, 3, 0, 5, 8, 5]
a__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 14 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( __a , unittest.TestCase ):
_A :Tuple = KandinskyVaaInpaintPipeline
_A :Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_A :Optional[int] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_A :Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_A :Union[str, Any] = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**snake_case__ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=snake_case__ , )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any]=0 ):
lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(snake_case__ ).startswith("""mps""" ):
lowercase = torch.manual_seed(snake_case__ )
else:
lowercase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**snake_case__ )
lowercase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def SCREAMING_SNAKE_CASE__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((7_68, 7_68) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowercase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 428 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase__ : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 451 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'table-transformer'
_snake_case = ['past_key_values']
_snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="sine" , SCREAMING_SNAKE_CASE_="resnet50" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 , **SCREAMING_SNAKE_CASE_ , )-> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = backbone_config.get('''model_type''' )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None, None, None
__UpperCamelCase = use_timm_backbone
__UpperCamelCase = backbone_config
__UpperCamelCase = num_channels
__UpperCamelCase = num_queries
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = init_xavier_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = encoder_layers
__UpperCamelCase = auxiliary_loss
__UpperCamelCase = position_embedding_type
__UpperCamelCase = backbone
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = dilation
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = mask_loss_coefficient
__UpperCamelCase = dice_loss_coefficient
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self )-> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = version.parse('1.11' )
@property
def A__ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A__ ( self )-> float:
'''simple docstring'''
return 1E-5
@property
def A__ ( self )-> int:
'''simple docstring'''
return 12
| 451 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : Optional[Any] = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A : Any = logging.get_logger(__name__)
class lowerCamelCase (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase__ = '''mask2former'''
lowerCamelCase__ = ['''swin''']
lowerCamelCase__ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : int , __magic_name__ : Dict = None , __magic_name__ : Union[str, Any] = 256 , __magic_name__ : int = 256 , __magic_name__ : Dict = 256 , __magic_name__ : Optional[int] = 1_024 , __magic_name__ : Union[str, Any] = "relu" , __magic_name__ : Dict = 6 , __magic_name__ : List[str] = 10 , __magic_name__ : Dict = 8 , __magic_name__ : List[Any] = 0.0 , __magic_name__ : Optional[int] = 2_048 , __magic_name__ : Tuple = False , __magic_name__ : Union[str, Any] = False , __magic_name__ : Any = 4 , __magic_name__ : Any = 255 , __magic_name__ : Union[str, Any] = 100 , __magic_name__ : Optional[Any] = 0.1 , __magic_name__ : List[str] = 2.0 , __magic_name__ : List[str] = 5.0 , __magic_name__ : Union[str, Any] = 5.0 , __magic_name__ : List[Any] = 12_544 , __magic_name__ : Tuple = 3.0 , __magic_name__ : Optional[Any] = 0.75 , __magic_name__ : str = 0.02 , __magic_name__ : Any = 1.0 , __magic_name__ : List[str] = True , __magic_name__ : List[Any] = [4, 8, 16, 32] , __magic_name__ : Dict = None , **__magic_name__ : Union[str, Any] , ) -> List[str]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__magic_name__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop("model_type" )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = encoder_feedforward_dim
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = dim_feedforward
SCREAMING_SNAKE_CASE_ = pre_norm
SCREAMING_SNAKE_CASE_ = enforce_input_projection
SCREAMING_SNAKE_CASE_ = common_stride
SCREAMING_SNAKE_CASE_ = ignore_value
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = class_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = train_num_points
SCREAMING_SNAKE_CASE_ = oversample_ratio
SCREAMING_SNAKE_CASE_ = importance_sample_ratio
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = feature_strides
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = decoder_layers
super().__init__(**__magic_name__ )
@classmethod
def __A ( cls : Tuple , __magic_name__ : Optional[int] , **__magic_name__ : str ) -> Dict:
return cls(
backbone_config=__magic_name__ , **__magic_name__ , )
def __A ( self : int ) -> Dict[str, any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 140 |
import os
from math import logaa
def A_ ( A__ = "base_exp.txt" ) -> int:
a__ : float = 0
a__ : Optional[Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
a__ , a__ : List[str] = list(map(A__ , line.split(',' ) ) )
if x * logaa(A__ ) > largest:
a__ : Dict = x * logaa(A__ )
a__ : List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 302 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 10_00 ):
__snake_case : List[Any] = 1
__snake_case : Any = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__snake_case : list[int] = []
__snake_case : List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__snake_case : Optional[int] = len(__UpperCAmelCase )
__snake_case : List[str] = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__snake_case : Union[str, Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ : Tuple = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
lowerCamelCase__ : Union[str, Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
lowerCamelCase__ : Optional[int] = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowercase__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""),
}) , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE_ , hypotheses=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_)
}
| 12 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set."""
def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any:
'''simple docstring'''
lowercase__ : Any = Path(lowercase_ )
path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowercase__ : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowercase__ : Dict = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowercase__ : Any = torch.cuda.device_count()
lowercase__ : Any = num_gpus
lowercase__ : Optional[int] = False
if num_gpus > 1:
lowercase__ : Tuple = """MULTI_GPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_xpu_available() and use_xpu:
lowercase__ : Union[str, Any] = torch.xpu.device_count()
lowercase__ : str = num_xpus
lowercase__ : List[Any] = False
if num_xpus > 1:
lowercase__ : str = """MULTI_XPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_npu_available():
lowercase__ : Tuple = torch.npu.device_count()
lowercase__ : Union[str, Any] = num_npus
lowercase__ : Union[str, Any] = False
if num_npus > 1:
lowercase__ : List[Any] = """MULTI_NPU"""
else:
lowercase__ : int = """NO"""
else:
lowercase__ : Union[str, Any] = 0
lowercase__ : str = True
lowercase__ : Union[str, Any] = 1
lowercase__ : int = """NO"""
lowercase__ : Tuple = ClusterConfig(**lowercase_ )
config.to_json_file(lowercase_ )
return path
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ )
parser.add_argument(
"""--config_file""" , default=lowercase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=lowercase_ )
return parser
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 12 | 1 |
import datasets
from .evaluate import evaluate
_A = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
_A = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
_A = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def A ( self : str )-> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def A ( self : Union[str, Any] , A_ : int , A_ : str )-> List[str]:
__UpperCamelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__UpperCamelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__UpperCamelCase = evaluate(dataset=__lowercase , predictions=__lowercase )
return score | 715 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , A_ : Dict , A_ : List[str] , A_ : Optional[int] , A_ : Any=None , A_ : List[Any]=None )-> Dict:
__UpperCamelCase = start
__UpperCamelCase = end
__UpperCamelCase = val
__UpperCamelCase = (start + end) // 2
__UpperCamelCase = left
__UpperCamelCase = right
def __repr__( self : Dict )-> Tuple:
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self : Any , A_ : Sequence , A_ : List[str] )-> str:
__UpperCamelCase = collection
__UpperCamelCase = function
if self.collection:
__UpperCamelCase = self._build_tree(0 , len(A_ ) - 1 )
def A ( self : List[Any] , A_ : Union[str, Any] , A_ : Tuple )-> Tuple:
self._update_tree(self.root , A_ , A_ )
def A ( self : str , A_ : Union[str, Any] , A_ : Any )-> Optional[Any]:
return self._query_range(self.root , A_ , A_ )
def A ( self : int , A_ : Optional[Any] , A_ : Union[str, Any] )-> Optional[int]:
if start == end:
return SegmentTreeNode(A_ , A_ , self.collection[start] )
__UpperCamelCase = (start + end) // 2
__UpperCamelCase = self._build_tree(A_ , A_ )
__UpperCamelCase = self._build_tree(mid + 1 , A_ )
return SegmentTreeNode(A_ , A_ , self.fn(left.val , right.val ) , A_ , A_ )
def A ( self : str , A_ : Union[str, Any] , A_ : Optional[int] , A_ : Dict )-> List[str]:
if node.start == i and node.end == i:
__UpperCamelCase = val
return
if i <= node.mid:
self._update_tree(node.left , A_ , A_ )
else:
self._update_tree(node.right , A_ , A_ )
__UpperCamelCase = self.fn(node.left.val , node.right.val )
def A ( self : Tuple , A_ : Tuple , A_ : Any , A_ : Union[str, Any] )-> Union[str, Any]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , A_ , A_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , A_ , node.mid ) , self._query_range(node.right , node.mid + 1 , A_ ) , )
else:
# range in right child tree
return self._query_range(node.right , A_ , A_ )
def A ( self : Any )-> str:
if self.root is not None:
__UpperCamelCase = Queue()
queue.put(self.root )
while not queue.empty():
__UpperCamelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
_A = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 228 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ = 1_0_0
UpperCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__A= set()
__A= 42
__A= 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int = 5000 ):
"""simple docstring"""
for number_to_partition in range(1,_SCREAMING_SNAKE_CASE ):
if len(partition(_SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 186 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
UpperCAmelCase__ = {
'''AI-Sweden/gpt-sw3-126m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-350m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-1.6b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-6.7b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-20b''': 2_0_4_8,
}
class a__ ( a_ ):
'''simple docstring'''
A : int = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> None:
__A= {} if sp_model_kwargs is None else sp_model_kwargs
__A= kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
__A= 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__A= '<|endoftext|>' if eos_token is None else eos_token
__A= '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__A= unk_token if pad_token is None else pad_token
__A= eos_token if bos_token is None else bos_token
else:
__A= '<pad>' if pad_token is None else pad_token
__A= '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__A= do_lower_case
__A= remove_space
__A= keep_accents
__A= vocab_file
__A= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
# Used for whitespace normalization in input texts
# fmt : off
__A= {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__A= re.compile(
F"""[{"".join(map(lowerCAmelCase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : Optional[int] ) -> Tuple:
__A= self.__dict__.copy()
__A= None
return state
def __setstate__( self : int , lowerCAmelCase_ : int ) -> Tuple:
__A= d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A= {}
__A= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCAmelCase ( self : Tuple ) -> int:
return len(self.sp_model )
def lowerCAmelCase ( self : int , lowerCAmelCase_ : str ) -> str:
__A= self.non_printing_characters_re.sub('' , lowerCAmelCase_ )
# Normalize whitespaces
__A= ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
__A= unicodedata.normalize('NFC' , lowerCAmelCase_ )
return text
def lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
__A= self.preprocess_text(lowerCAmelCase_ )
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowerCAmelCase ( self : Any , lowerCAmelCase_ : str ) -> int:
return self.sp_model.PieceToId(lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
return self.sp_model.IdToPiece(lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ : str ) -> str:
return out_string
def lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> str:
__A= []
__A= ''
__A= False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__A= True
__A= []
else:
current_sub_tokens.append(lowerCAmelCase_ )
__A= False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string
def lowerCAmelCase ( self : List[Any] ) -> Dict[str, int]:
__A= {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A= os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__A= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__A= self.preprocess_text(lowerCAmelCase_ )
__A= self.sp_model.encode(lowerCAmelCase_ )
else:
__A= [self.preprocess_text(lowerCAmelCase_ ) for t in text]
__A= self.sp_model.encode(lowerCAmelCase_ )
if return_tensors is True or return_tensors == "pt":
__A= torch.tensor(lowerCAmelCase_ )
return token_ids
def lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Union[int, List[int]] ) -> str:
return self.sp_model.decode(lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : "Conversation" ) -> List[int]:
__A= [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__A= (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(lowerCAmelCase_ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=lowerCAmelCase_ )
| 186 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A_ :Optional[Any] = TypeVar('''T''')
A_ :Optional[Any] = TypeVar('''U''')
class __A ( Generic[T, U] ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =key
__UpperCamelCase : Union[str, Any] =val
__UpperCamelCase : Dict =None
__UpperCamelCase : Union[str, Any] =None
def __repr__( self ):
"""simple docstring"""
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __A ( Generic[T, U] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
__UpperCamelCase : str =DoubleLinkedListNode(_lowerCamelCase , _lowerCamelCase )
__UpperCamelCase : Any =DoubleLinkedListNode(_lowerCamelCase , _lowerCamelCase )
__UpperCamelCase , __UpperCamelCase : Any =self.rear, self.head
def __repr__( self ):
"""simple docstring"""
__UpperCamelCase : str =['DoubleLinkedList']
__UpperCamelCase : str =self.head
while node.next is not None:
rep.append(str(_lowerCamelCase ) )
__UpperCamelCase : List[str] =node.next
rep.append(str(self.rear ) )
return ",\n ".join(_lowerCamelCase )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__UpperCamelCase : Tuple =node
__UpperCamelCase : int =previous
__UpperCamelCase : Union[str, Any] =node
__UpperCamelCase : Any =self.rear
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
__UpperCamelCase : Union[str, Any] =node.next
__UpperCamelCase : List[Any] =node.prev
__UpperCamelCase : str =None
__UpperCamelCase : Optional[int] =None
return node
class __A ( Generic[T, U] ):
"""simple docstring"""
UpperCamelCase__ : dict[Callable[[T], U], LRUCache[T, U]] ={}
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =DoubleLinkedList()
__UpperCamelCase : List[str] =capacity
__UpperCamelCase : Union[str, Any] =0
__UpperCamelCase : Dict =0
__UpperCamelCase : Optional[Any] =0
__UpperCamelCase : List[Any] ={}
def __repr__( self ):
"""simple docstring"""
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , lowerCamelCase__ ):
"""simple docstring"""
return key in self.cache
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
__UpperCamelCase : List[str] =self.cache[key]
__UpperCamelCase : Dict =self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_lowerCamelCase )
return node.val
self.miss += 1
return None
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__UpperCamelCase : List[str] =self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__UpperCamelCase : str =DoubleLinkedListNode(_lowerCamelCase , _lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__UpperCamelCase : Union[str, Any] =self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__UpperCamelCase : Optional[int] =value
self.list.add(_lowerCamelCase )
@classmethod
def __lowercase ( cls , lowerCamelCase__ = 128 ):
"""simple docstring"""
def cache_decorator_inner(lowerCamelCase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
__UpperCamelCase : Tuple =LRUCache(_lowerCamelCase )
__UpperCamelCase : Union[str, Any] =cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__UpperCamelCase : Optional[Any] =func(*_lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , _lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_lowerCamelCase , 'cache_info' , _lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ="""dandelin/vilt-b32-finetuned-vqa"""
UpperCamelCase__ : str =(
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
UpperCamelCase__ : Any ="""image_qa"""
UpperCamelCase__ : int =AutoProcessor
UpperCamelCase__ : Optional[Any] =AutoModelForVisualQuestionAnswering
UpperCamelCase__ : Dict =["""image""", """text"""]
UpperCamelCase__ : List[Any] =["""text"""]
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return self.pre_processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors='pt' )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowerCamelCase__ ).logits
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 154 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCAmelCase = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def lowercase__ ( lowerCAmelCase__ : str = "dhaka" , lowerCAmelCase__ : int = 5 ) -> int:
'''simple docstring'''
a__ : int = min(lowerCAmelCase__ , 5_0 ) # Prevent abuse!
a__ : Union[str, Any] = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
a__ : Optional[int] = requests.get("https://www.google.com/search" , params=lowerCAmelCase__ , headers=lowerCAmelCase__ )
a__ : Any = BeautifulSoup(html.text , "html.parser" )
a__ : List[Any] = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
a__ : Any = json.dumps(lowerCAmelCase__ )
a__ : int = json.loads(lowerCAmelCase__ )
a__ : Tuple = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , lowerCAmelCase__ , )
if not matched_google_image_data:
return 0
a__ : Tuple = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(lowerCAmelCase__ ) , )
a__ : List[Any] = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , lowerCAmelCase__ , )
for index, fixed_full_res_image in enumerate(lowerCAmelCase__ ):
if index >= max_images:
return index
a__ : Optional[int] = bytes(lowerCAmelCase__ , "ascii" ).decode(
"unicode-escape" )
a__ : Any = bytes(lowerCAmelCase__ , "ascii" ).decode(
"unicode-escape" )
a__ : Optional[int] = urllib.request.build_opener()
a__ : int = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(lowerCAmelCase__ )
a__ : Dict = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
urllib.request.urlretrieve( # noqa: S310
lowerCAmelCase__ , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__UpperCAmelCase = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print('''Please provide a search term.''')
raise | 642 |
"""simple docstring"""
import os
def lowercase__ ( ) -> Optional[Any]:
'''simple docstring'''
with open(os.path.dirname(lowerCAmelCase__ ) + "/p022_names.txt" ) as file:
a__ : Optional[int] = str(file.readlines()[0] )
a__ : Optional[int] = names.replace("\"" , "" ).split("," )
names.sort()
a__ : int = 0
a__ : Dict = 0
for i, name in enumerate(lowerCAmelCase__ ):
for letter in name:
name_score += ord(lowerCAmelCase__ ) - 6_4
total_score += (i + 1) * name_score
a__ : Tuple = 0
return total_score
if __name__ == "__main__":
print(solution()) | 642 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase ( lowerCamelCase : Optional[int]=None):
if subparsers is not None:
A_ : Tuple = subparsers.add_parser("""env""")
else:
A_ : Dict = argparse.ArgumentParser("""Accelerate env command""")
parser.add_argument(
"""--config_file""" , default=lowerCamelCase , help="""The config file to use for the default values in the launching script.""")
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase)
return parser
def lowerCamelCase ( lowerCamelCase : int):
A_ : Optional[int] = torch.__version__
A_ : Optional[int] = torch.cuda.is_available()
A_ : List[str] = is_xpu_available()
A_ : Union[str, Any] = is_npu_available()
A_ : Optional[Any] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase):
A_ : Dict = load_config_from_file(args.config_file).to_dict()
A_ : Tuple = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(lowerCamelCase),
"""PyTorch NPU available""": str(lowerCamelCase),
"""System RAM""": F'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
A_ : Tuple = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""")
print("""\n""".join([F'- {prop}: {val}' for prop, val in info.items()]))
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""")
A_ : List[Any] = (
"""\n""".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()])
if isinstance(lowerCamelCase , lowerCamelCase)
else F'\t{accelerate_config}'
)
print(lowerCamelCase)
A_ : Optional[Any] = accelerate_config
return info
def lowerCamelCase ( ):
A_ : Dict = env_command_parser()
A_ : Dict = parser.parse_args()
env_command(lowerCamelCase)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 27 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 1 |
from __future__ import annotations
def lowerCamelCase ( UpperCamelCase : list[int] , UpperCamelCase : list[int] , UpperCamelCase : int ) -> tuple[float, list[float]]:
_lowerCamelCase = list(range(len(UpperCamelCase ) ) )
_lowerCamelCase = [v / w for v, w in zip(UpperCamelCase , UpperCamelCase )]
index.sort(key=lambda UpperCamelCase : ratio[i] , reverse=UpperCamelCase )
_lowerCamelCase = 0
_lowerCamelCase = [0] * len(UpperCamelCase )
for i in index:
if weight[i] <= capacity:
_lowerCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
_lowerCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 544 | def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> float:
return base * power(UpperCamelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
A = int(input('Enter the base: ').strip())
A = int(input('Enter the exponent: ').strip())
A = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A = 1 / result
print(F'''{base} to the power of {exponent} is {result}''') | 544 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : BigBirdConfig
lowercase : jnp.dtype = jnp.floataa
lowercase : bool = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
super().setup()
A : List[Any] =nn.Dense(5 , dtype=self.dtype )
def __call__( self : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
A : Dict =super().__call__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : List[str] =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def A__ ( lowercase: str, lowercase: Optional[int], lowercase: Tuple, lowercase: Dict, lowercase: Optional[Any], lowercase: Dict ) -> int:
def cross_entropy(lowercase: Optional[int], lowercase: Dict, lowercase: List[str]=None ):
A : Any =logits.shape[-1]
A : List[Any] =(labels[..., None] == jnp.arange(lowercase )[None]).astype('f4' )
A : Union[str, Any] =jax.nn.log_softmax(lowercase, axis=-1 )
A : Union[str, Any] =-jnp.sum(labels * logits, axis=-1 )
if reduction is not None:
A : List[Any] =reduction(lowercase )
return loss
A : Union[str, Any] =partial(lowercase, reduction=jnp.mean )
A : Any =cross_entropy(lowercase, lowercase )
A : List[str] =cross_entropy(lowercase, lowercase )
A : Union[str, Any] =cross_entropy(lowercase, lowercase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : str = "google/bigbird-roberta-base"
lowercase : int = 3000
lowercase : int = 10500
lowercase : int = 128
lowercase : int = 3
lowercase : int = 1
lowercase : int = 5
# tx_args
lowercase : float = 3e-5
lowercase : float = 0.0
lowercase : int = 20000
lowercase : float = 0.0_0_9_5
lowercase : str = "bigbird-roberta-natural-questions"
lowercase : str = "training-expt"
lowercase : str = "data/nq-training.jsonl"
lowercase : str = "data/nq-validation.jsonl"
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Tuple =os.path.join(self.base_dir , self.save_dir )
A : Optional[Any] =self.batch_size_per_device * jax.device_count()
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : int
lowercase : int = 4096 # no dynamic padding on TPUs
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
A : Dict =self.collate_fn(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =jax.tree_util.tree_map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return batch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
A , A : Optional[int] =self.fetch_inputs(features['input_ids'] )
A : Union[str, Any] ={
'input_ids': jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ),
'attention_mask': jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : list ) -> Union[str, Any]:
A : Tuple =[self._fetch_inputs(SCREAMING_SNAKE_CASE__ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : list ) -> Any:
A : Any =[1 for _ in range(len(SCREAMING_SNAKE_CASE__ ) )]
while len(SCREAMING_SNAKE_CASE__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def A__ ( lowercase: Optional[Any], lowercase: List[Any], lowercase: Optional[Any]=None ) -> Union[str, Any]:
if seed is not None:
A : Dict =dataset.shuffle(seed=lowercase )
for i in range(len(lowercase ) // batch_size ):
A : Optional[int] =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase )
@partial(jax.pmap, axis_name='batch' )
def A__ ( lowercase: str, lowercase: str, **lowercase: Any ) -> Optional[int]:
def loss_fn(lowercase: Dict ):
A : Union[str, Any] =model_inputs.pop('start_labels' )
A : Optional[Any] =model_inputs.pop('end_labels' )
A : Any =model_inputs.pop('pooled_labels' )
A : List[Any] =state.apply_fn(**lowercase, params=lowercase, dropout_rng=lowercase, train=lowercase )
A , A , A : List[str] =outputs
return state.loss_fn(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, )
A , A : Union[str, Any] =jax.random.split(lowercase )
A : Dict =jax.value_and_grad(lowercase )
A , A : str =grad_fn(state.params )
A : Any =jax.lax.pmean({'loss': loss}, axis_name='batch' )
A : Dict =jax.lax.pmean(lowercase, 'batch' )
A : List[str] =state.apply_gradients(grads=lowercase )
return state, metrics, new_drp_rng
@partial(jax.pmap, axis_name='batch' )
def A__ ( lowercase: str, **lowercase: Tuple ) -> List[str]:
A : List[str] =model_inputs.pop('start_labels' )
A : Union[str, Any] =model_inputs.pop('end_labels' )
A : Union[str, Any] =model_inputs.pop('pooled_labels' )
A : List[Any] =state.apply_fn(**lowercase, params=state.params, train=lowercase )
A , A , A : List[Any] =outputs
A : Tuple =state.loss_fn(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
A : int =jax.lax.pmean({'loss': loss}, axis_name='batch' )
return metrics
class SCREAMING_SNAKE_CASE_ ( train_state.TrainState ):
'''simple docstring'''
lowercase : Callable = struct.field(pytree_node=lowerCAmelCase_ )
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : Args
lowercase : Callable
lowercase : Callable
lowercase : Callable
lowercase : Callable
lowercase : wandb
lowercase : Callable = None
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=None ) -> List[Any]:
A : List[Any] =model.params
A : int =TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE__ , tx=SCREAMING_SNAKE_CASE__ , loss_fn=SCREAMING_SNAKE_CASE__ , )
if ckpt_dir is not None:
A , A , A , A , A : Dict =restore_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : str ={
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
A , A : Optional[int] =build_tx(**SCREAMING_SNAKE_CASE__ )
A : List[Any] =train_state.TrainState(
step=SCREAMING_SNAKE_CASE__ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE__ , tx=SCREAMING_SNAKE_CASE__ , opt_state=SCREAMING_SNAKE_CASE__ , )
A : int =args
A : List[str] =data_collator
A : List[str] =lr
A : Any =params
A : Any =jax_utils.replicate(SCREAMING_SNAKE_CASE__ )
return state
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : Union[str, Any] =self.args
A : Optional[int] =len(SCREAMING_SNAKE_CASE__ ) // args.batch_size
A : Optional[int] =jax.random.PRNGKey(0 )
A : str =jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
for epoch in range(args.max_epochs ):
A : Optional[Any] =jnp.array(0 , dtype=jnp.floataa )
A : Union[str, Any] =get_batched_dataset(SCREAMING_SNAKE_CASE__ , args.batch_size , seed=SCREAMING_SNAKE_CASE__ )
A : Any =0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , desc=f'Running EPOCH-{epoch}' ):
A : Dict =self.data_collator(SCREAMING_SNAKE_CASE__ )
A , A , A : Union[str, Any] =self.train_step_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
A : Union[str, Any] =jax_utils.unreplicate(state.step )
A : int =running_loss.item() / i
A : List[Any] =self.scheduler_fn(state_step - 1 )
A : str =self.evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Tuple ={
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE__ ) )
self.logger.log(SCREAMING_SNAKE_CASE__ , commit=SCREAMING_SNAKE_CASE__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
A : List[Any] =get_batched_dataset(SCREAMING_SNAKE_CASE__ , self.args.batch_size )
A : str =len(SCREAMING_SNAKE_CASE__ ) // self.args.batch_size
A : List[str] =jnp.array(0 , dtype=jnp.floataa )
A : Optional[int] =0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , desc='Evaluating ... ' ):
A : Dict =self.data_collator(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.val_step_fn(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
A : List[Any] =jax_utils.unreplicate(SCREAMING_SNAKE_CASE__ )
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ' )
self.model_save_fn(SCREAMING_SNAKE_CASE__ , params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE__ , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE__ , 'data_collator.joblib' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , SCREAMING_SNAKE_CASE__ )
print('DONE' )
def A__ ( lowercase: Tuple, lowercase: Dict ) -> Optional[Any]:
print(F'RESTORING CHECKPOINT FROM {save_dir}', end=' ... ' )
with open(os.path.join(lowercase, 'flax_model.msgpack' ), 'rb' ) as f:
A : Tuple =from_bytes(state.params, f.read() )
with open(os.path.join(lowercase, 'opt_state.msgpack' ), 'rb' ) as f:
A : List[str] =from_bytes(state.opt_state, f.read() )
A : Any =joblib.load(os.path.join(lowercase, 'args.joblib' ) )
A : Any =joblib.load(os.path.join(lowercase, 'data_collator.joblib' ) )
with open(os.path.join(lowercase, 'training_state.json' ), 'r' ) as f:
A : List[str] =json.load(lowercase )
A : int =training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def A__ ( lowercase: Dict, lowercase: List[Any], lowercase: List[str], lowercase: List[str] ) -> int:
A : str =num_train_steps - warmup_steps
A : Any =optax.linear_schedule(init_value=lowercase, end_value=lowercase, transition_steps=lowercase )
A : str =optax.linear_schedule(init_value=lowercase, end_value=1e-7, transition_steps=lowercase )
A : int =optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] )
return lr
def A__ ( lowercase: Union[str, Any], lowercase: Union[str, Any], lowercase: Tuple, lowercase: Union[str, Any], lowercase: List[str] ) -> Union[str, Any]:
def weight_decay_mask(lowercase: List[Any] ):
A : Union[str, Any] =traverse_util.flatten_dict(lowercase )
A : List[str] ={k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase )
A : Dict =scheduler_fn(lowercase, lowercase, lowercase, lowercase )
A : List[str] =optax.adamw(learning_rate=lowercase, weight_decay=lowercase, mask=lowercase )
return tx, lr
| 661 | import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase):
a_ = "gpt_bigcode"
a_ = ["past_key_values"]
a_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , _A : str=5_02_57 , _A : int=10_24 , _A : Optional[int]=7_68 , _A : Dict=12 , _A : Optional[int]=12 , _A : List[str]=None , _A : str="gelu_pytorch_tanh" , _A : Optional[Any]=0.1 , _A : Tuple=0.1 , _A : Dict=0.1 , _A : List[str]=1e-5 , _A : List[str]=0.02 , _A : Optional[int]=True , _A : List[str]=True , _A : Optional[Any]=5_02_56 , _A : Optional[int]=5_02_56 , _A : Optional[Any]=True , _A : Union[str, Any]=True , _A : int=True , **_A : Union[str, Any] , ) -> List[str]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : List[Any] = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Dict = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Union[str, Any] = embd_pdrop
UpperCAmelCase_ : Dict = attn_pdrop
UpperCAmelCase_ : Union[str, Any] = layer_norm_epsilon
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : Optional[Any] = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Dict = multi_query
UpperCAmelCase_ : Optional[int] = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
| 541 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : Optional[Any] , _A : int , _A : List[Any]=7 , _A : Tuple=3 , _A : int=18 , _A : Union[str, Any]=30 , _A : Any=4_00 , _A : List[Any]=True , _A : Optional[int]=None , _A : Optional[Any]=True , _A : Union[str, Any]=None , ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 20}
UpperCAmelCase_ : Dict = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : List[str] = max_resolution
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : Any = size
UpperCAmelCase_ : Union[str, Any] = do_center_crop
UpperCAmelCase_ : Any = crop_size
def A ( self : List[Any] ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = MobileNetVaImageProcessor if is_vision_available() else None
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : Dict = MobileNetVaImageProcessingTester(self )
@property
def A ( self : Optional[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> Dict:
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''crop_size''' ) )
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def A ( self : int ) -> List[str]:
pass
def A ( self : List[Any] ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : str ) -> List[str]:
# Initialize image_processing
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 541 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__a: Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> None:
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 428 | '''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = BloomTokenizerFast
SCREAMING_SNAKE_CASE = BloomTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = "tokenizer_file"
SCREAMING_SNAKE_CASE = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
lowercase__ : List[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : List[str] = self.get_rust_tokenizer()
lowercase__ : Union[str, Any] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase__ : Dict = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase__ : List[Any] = tokenizer.batch_encode_plus(__lowerCAmelCase )['''input_ids''']
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase=6 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ : str = '''This is a simple input'''
lowercase__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ : Dict = ('''This is a simple input''', '''This is a pair''')
lowercase__ : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.batch_encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.encode(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.batch_encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase__ : List[str] = None # Hotfixing padding = None
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Dict = self.get_rust_tokenizer()
lowercase__ : Any = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=__lowerCAmelCase )
lowercase__ : Optional[Any] = next(iter(__lowerCAmelCase ) )['''premise'''] # pick up one data
lowercase__ : List[str] = list(sample_data.values() )
lowercase__ : str = list(map(tokenizer.encode , __lowerCAmelCase ) )
lowercase__ : List[str] = [tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) for x in output_tokens]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 428 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : str , UpperCamelCase : str ):
"""simple docstring"""
A__ : Any =get_failure_array(UpperCamelCase )
# 2) Step through text searching for pattern
A__ , A__ : List[str] =0, 0 # index into text, pattern
while i < len(UpperCamelCase ):
if pattern[j] == text[i]:
if j == (len(UpperCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
A__ : List[str] =failure[j - 1]
continue
i += 1
return False
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
A__ : Union[str, Any] =[0]
A__ : Any =0
A__ : Any =1
while j < len(UpperCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
A__ : str =failure[i - 1]
continue
j += 1
failure.append(UpperCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
__A : Dict = "abc1abc12"
__A : Dict = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__A : List[str] = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__A : Dict = "ABABX"
__A : Any = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
__A : str = "AAAB"
__A : Dict = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
__A : Optional[Any] = "abcdabcy"
__A : int = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
__A : Union[str, Any] = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 656 | """simple docstring"""
__A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)}
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def lowercase ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
import math
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : float = 1 / 1_2345 ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 3
while True:
UpperCAmelCase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
UpperCAmelCase__ = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 704 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def lowerCAmelCase ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float , _lowerCAmelCase : int = 1_6000 ):
"""simple docstring"""
UpperCAmelCase__ = int(round(sample_rate * max_length ) )
if len(_lowerCAmelCase ) <= sample_length:
return wav
UpperCAmelCase__ = randint(0 , len(_lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _UpperCamelCase :
UpperCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
UpperCAmelCase_ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCAmelCase_ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
UpperCAmelCase_ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
UpperCAmelCase_ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCAmelCase_ = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class _UpperCamelCase :
UpperCAmelCase_ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
UpperCAmelCase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCAmelCase_ ( self :str ) -> List[Any]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _lowerCAmelCase , _lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCAmelCase__ = DatasetDict()
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCAmelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCAmelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_lowerCAmelCase : Tuple ):
UpperCAmelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCAmelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_lowerCAmelCase )
UpperCAmelCase__ = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase__ = {model_input_name: inputs.get(_lowerCAmelCase )}
UpperCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCAmelCase : Union[str, Any] ):
UpperCAmelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCAmelCase__ = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase__ = {model_input_name: inputs.get(_lowerCAmelCase )}
UpperCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCAmelCase__ , UpperCAmelCase__ = {}, {}
for i, label in enumerate(_lowerCAmelCase ):
UpperCAmelCase__ = str(_lowerCAmelCase )
UpperCAmelCase__ = label
# Load the accuracy metric from the datasets package
UpperCAmelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase : Optional[Any] ):
UpperCAmelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_lowerCAmelCase , references=eval_pred.label_ids )
UpperCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel=_lowerCAmelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase )
# Initialize our trainer
UpperCAmelCase__ = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
# Training
if training_args.do_train:
UpperCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__ = last_checkpoint
UpperCAmelCase__ = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCAmelCase )
trainer.save_metrics("eval" , _lowerCAmelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
if __name__ == "__main__":
main()
| 364 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = "convnextv2"
def __init__( self : Tuple , A : List[str]=3 , A : Any=4 , A : Tuple=4 , A : List[str]=None , A : Union[str, Any]=None , A : Optional[Any]="gelu" , A : List[Any]=0.02 , A : Dict=1E-12 , A : List[str]=0.0 , A : Dict=224 , A : Any=None , A : List[str]=None , **A : str , ):
super().__init__(**A )
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : Optional[int] = num_stages
_UpperCAmelCase : Dict = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_UpperCAmelCase : Tuple = [3, 3, 9, 3] if depths is None else depths
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Optional[Any] = drop_path_rate
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Dict = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : str = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
| 244 | '''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , A : List[Any] , ):
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Tuple = 13
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : List[Any] = 30
_UpperCAmelCase : Any = self.seq_length + self.mem_len
_UpperCAmelCase : str = 15
_UpperCAmelCase : Dict = True
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = 99
_UpperCAmelCase : int = [10, 50, 80]
_UpperCAmelCase : List[str] = 32
_UpperCAmelCase : List[str] = 32
_UpperCAmelCase : Any = 4
_UpperCAmelCase : List[Any] = 8
_UpperCAmelCase : Any = 128
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : int = 2
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Any = 3
_UpperCAmelCase : List[str] = self.vocab_size - 1
_UpperCAmelCase : Any = 0.01
def _A ( self : Any ):
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _A ( self : Optional[int] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _A ( self : int , A : Union[str, Any] , A : str , A : List[str] , A : Optional[int] ):
_UpperCAmelCase : int = TFTransfoXLModel(A )
_UpperCAmelCase , _UpperCAmelCase : str = model(A ).to_tuple()
_UpperCAmelCase : Any = {"input_ids": input_ids_a, "mems": mems_a}
_UpperCAmelCase , _UpperCAmelCase : Dict = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _A ( self : Optional[int] , A : Dict , A : Union[str, Any] , A : Any , A : Tuple ):
_UpperCAmelCase : Tuple = TFTransfoXLLMHeadModel(A )
_UpperCAmelCase , _UpperCAmelCase : Dict = model(A ).to_tuple()
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids_a, "labels": lm_labels}
_UpperCAmelCase , _UpperCAmelCase : List[Any] = model(A ).to_tuple()
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = model([input_ids_a, mems_a] ).to_tuple()
_UpperCAmelCase : Tuple = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_UpperCAmelCase , _UpperCAmelCase : Tuple = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _A ( self : Optional[Any] , A : Optional[Any] , A : Tuple , A : List[str] , A : Union[str, Any] ):
_UpperCAmelCase : Dict = TFTransfoXLForSequenceClassification(A )
_UpperCAmelCase : List[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : int ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = config_and_inputs
_UpperCAmelCase : Any = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCamelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__UpperCamelCase: Tuple = () if is_tf_available() else ()
__UpperCamelCase: Union[str, Any] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__UpperCamelCase: Any = False
__UpperCamelCase: Optional[Any] = False
__UpperCamelCase: List[str] = False
__UpperCamelCase: List[Any] = False
def _A ( self : Tuple , A : Dict , A : int , A : str , A : Any , A : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _A ( self : List[str] ):
_UpperCAmelCase : int = TFTransfoXLModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=A , d_embed=37 )
def _A ( self : int ):
self.config_tester.run_common_tests()
def _A ( self : List[Any] ):
self.model_tester.set_seed()
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def _A ( self : str ):
self.model_tester.set_seed()
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class(A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_UpperCAmelCase : List[Any] = model.get_output_embeddings()
assert isinstance(A , tf.keras.layers.Layer )
_UpperCAmelCase : List[Any] = model.get_bias()
assert name is None
else:
_UpperCAmelCase : Optional[int] = model.get_output_embeddings()
assert x is None
_UpperCAmelCase : Any = model.get_bias()
assert name is None
def _A ( self : Dict ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _A ( self : Any ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def _A ( self : Any ):
pass
@require_tf
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : str = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
_UpperCAmelCase : int = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_UpperCAmelCase : List[str] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_UpperCAmelCase : List[Any] = model.generate(A , max_length=200 , do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() , A )
| 244 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a__ = logging.get_logger('''transformers.models.speecht5''')
def snake_case__ ( a , a , a ) -> Optional[Any]:
'''simple docstring'''
hf_model.apply_weight_norm()
snake_case__ = checkpoint["""input_conv.weight_g"""]
snake_case__ = checkpoint["""input_conv.weight_v"""]
snake_case__ = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
snake_case__ = checkpoint[F"""upsamples.{i}.1.weight_g"""]
snake_case__ = checkpoint[F"""upsamples.{i}.1.weight_v"""]
snake_case__ = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case__ = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
snake_case__ = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
snake_case__ = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
snake_case__ = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
snake_case__ = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
snake_case__ = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
snake_case__ = checkpoint["""output_conv.1.weight_g"""]
snake_case__ = checkpoint["""output_conv.1.weight_v"""]
snake_case__ = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def snake_case__ ( a , a , a , a=None , a=None , ) -> List[str]:
'''simple docstring'''
if config_path is not None:
snake_case__ = SpeechTaHifiGanConfig.from_pretrained(a )
else:
snake_case__ = SpeechTaHifiGanConfig()
snake_case__ = SpeechTaHifiGan(a )
snake_case__ = torch.load(a )
load_weights(orig_checkpoint["""model"""]["""generator"""] , a , a )
snake_case__ = np.load(a )
snake_case__ = stats[0].reshape(-1 )
snake_case__ = stats[1].reshape(-1 )
snake_case__ = torch.from_numpy(a ).float()
snake_case__ = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 717 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case__ ( a ) -> Optional[int]:
'''simple docstring'''
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=a )
snake_case__ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(a )
EnvironmentCommand.register_subcommand(a )
TestCommand.register_subcommand(a )
RunBeamCommand.register_subcommand(a )
DummyDataCommand.register_subcommand(a )
# Parse args
snake_case__ , snake_case__ = parser.parse_known_args()
if not hasattr(a , """func""" ):
parser.print_help()
exit(1 )
snake_case__ = parse_unknown_args(a )
# Run
snake_case__ = args.func(a , **a )
service.run()
if __name__ == "__main__":
main() | 566 | 0 |
def _lowercase ( __UpperCamelCase : int , __UpperCamelCase : int ):
return int((input_a, input_a).count(1 ) != 0 )
def _lowercase ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 214 |
lowerCAmelCase : Dict = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _lowercase ( __UpperCamelCase : dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
snake_case__ = set()
# keep track of all the paths to be checked
snake_case__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
snake_case__ = queue.pop(0 )
# get the last node from the path
snake_case__ = path[-1]
if node not in explored:
snake_case__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
snake_case__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def _lowercase ( __UpperCamelCase : dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
snake_case__ = [start]
snake_case__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
snake_case__ = {start: 0, target: -1}
while queue:
snake_case__ = queue.pop(0 )
if node == target:
snake_case__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
snake_case__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 214 | 1 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__lowerCamelCase : int = logging.getLogger(__name__)
__lowerCamelCase : Union[str, Any] = 'Hello world! cécé herlolip'
__lowerCamelCase : Union[str, Any] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = BertAbsConfig(
temp_dir="." ,finetune_bert=_lowerCamelCase ,large=_lowerCamelCase ,share_emb=_lowerCamelCase ,use_bert_emb=_lowerCamelCase ,encoder="bert" ,max_pos=512 ,enc_layers=6 ,enc_hidden_size=512 ,enc_heads=8 ,enc_ff_size=512 ,enc_dropout=0.2 ,dec_layers=6 ,dec_hidden_size=768 ,dec_heads=8 ,dec_ff_size=2048 ,dec_dropout=0.2 ,)
snake_case_ : Tuple = torch.load(_lowerCamelCase ,lambda __magic_name__ ,__magic_name__ : storage )
snake_case_ : int = AbsSummarizer(_lowerCamelCase ,torch.device("cpu" ) ,_lowerCamelCase )
original.eval()
snake_case_ : Any = BertAbsSummarizer(_lowerCamelCase ,torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
snake_case_ : Any = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
snake_case_ : Tuple = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_lowerCamelCase )) )
snake_case_ : Optional[Any] = torch.tensor(_lowerCamelCase ).unsqueeze(0 )
snake_case_ : Optional[Any] = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_lowerCamelCase )) )
snake_case_ : Optional[int] = torch.tensor(_lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
snake_case_ : Any = encoder_input_ids
snake_case_ : Union[str, Any] = decoder_input_ids
snake_case_ : Tuple = None
snake_case_ : Optional[int] = None
snake_case_ : int = None
snake_case_ : Any = None
snake_case_ : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
snake_case_ : List[str] = original(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )[0]
snake_case_ : Dict = original.generator(_lowerCamelCase )
snake_case_ : str = new_model(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )[0]
snake_case_ : Optional[int] = new_model.generator(_lowerCamelCase )
snake_case_ : Optional[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_lowerCamelCase ) )
snake_case_ : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_lowerCamelCase ) )
snake_case_ : Any = torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() ,"./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 706 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.