code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from cva import destroyAllWindows, imread, imshow, waitKey
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
snake_case_ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_UpperCAmelCase : Optional[int] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
_UpperCAmelCase : str = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 362 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase__ = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ):
"""simple docstring"""
# Initialise PyTorch model
_UpperCAmelCase = XLNetConfig.from_json_file(a_ )
_UpperCAmelCase = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_UpperCAmelCase = finetuning_task
_UpperCAmelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCAmelCase = XLNetForSequenceClassification(a_ )
elif "squad" in finetuning_task:
_UpperCAmelCase = finetuning_task
_UpperCAmelCase = XLNetForQuestionAnswering(a_ )
else:
_UpperCAmelCase = XLNetLMHeadModel(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a_ ,a_ ,a_ )
# Save pytorch-model
_UpperCAmelCase = os.path.join(a_ ,a_ )
_UpperCAmelCase = os.path.join(a_ ,a_ )
print(f'''Save PyTorch model to {os.path.abspath(a_ )}''' )
torch.save(model.state_dict() ,a_ )
print(f'''Save configuration file to {os.path.abspath(a_ )}''' )
with open(a_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 277 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : list[int] ,lowerCAmelCase_ : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(a_ ) == len(a_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
SCREAMING_SNAKE_CASE_ : str =equationa
SCREAMING_SNAKE_CASE_ : List[str] =equationa
# Calculate the determinants of the matrices
SCREAMING_SNAKE_CASE_ : Tuple =aa * ba - aa * ba
SCREAMING_SNAKE_CASE_ : List[str] =ca * ba - ca * ba
SCREAMING_SNAKE_CASE_ : int =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
SCREAMING_SNAKE_CASE_ : List[Any] =determinant_x / determinant
SCREAMING_SNAKE_CASE_ : List[Any] =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 220 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ : List[Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : int=None ) -> List[str]:
require_version(deps[pkg] ,a_ )
| 17 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 0 |
def _a ( __UpperCamelCase : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 0 |
"""simple docstring"""
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
UpperCAmelCase : Optional[int] = gray_code_sequence_string(a_ )
#
# convert them to integers
for i in range(len(a_ ) ):
UpperCAmelCase : Tuple = int(sequence[i] , 2 )
return sequence
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase : List[Any] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase : str = '''0''' + smaller_sequence[i]
sequence.append(a_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase : List[Any] = '''1''' + smaller_sequence[i]
sequence.append(a_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = """<<<<<<< This should probably be modified because it mentions: """
lowerCAmelCase = """=======
>>>>>>>
"""
lowerCAmelCase = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value(\'\1\')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value(\'string\')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value(\'string\'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def __A ( a_ : Namespace ):
return ConvertCommand(args.tfds_path ,args.datasets_directory )
class lowerCamelCase ( _UpperCamelCase ):
@staticmethod
def _lowerCamelCase ( a_ ):
lowerCAmelCase : List[str] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , a_ , a_ , *a_ ):
lowerCAmelCase : Dict = get_logger("datasets-cli/converting" )
lowerCAmelCase : Union[str, Any] = tfds_path
lowerCAmelCase : Any = datasets_directory
def _lowerCamelCase ( self ):
if os.path.isdir(self._tfds_path ):
lowerCAmelCase : Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowerCAmelCase : int = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowerCAmelCase : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowerCAmelCase : Tuple = []
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
lowerCAmelCase : Union[str, Any] = os.listdir(_UpperCAmelCase )
else:
lowerCAmelCase : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowerCAmelCase : List[str] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase : Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(_UpperCAmelCase , encoding="utf-8" ) as f:
lowerCAmelCase : Optional[int] = f.readlines()
lowerCAmelCase : List[str] = []
lowerCAmelCase : Any = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : Union[str, Any] = []
for line in lines:
lowerCAmelCase : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCAmelCase : int = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowerCAmelCase : str = ''''''
continue
elif "from absl import logging" in out_line:
lowerCAmelCase : Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowerCAmelCase : Union[str, Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowerCAmelCase : List[str] = True
lowerCAmelCase : Optional[Any] = list(filter(lambda a_ : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + "\n" )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCAmelCase : List[str] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCAmelCase : Tuple = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowerCAmelCase : List[Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCAmelCase : Union[str, Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCAmelCase : Optional[int] = f_name.replace(".py" , "" )
lowerCAmelCase : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowerCAmelCase : List[Any] = os.path.basename(_UpperCAmelCase )
lowerCAmelCase : List[str] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 525 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCamelCase__ ( A_="ro" , A_="en" , A_="wmt16" , A_=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
UpperCAmelCase_ = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
UpperCAmelCase_ = datasets.load_dataset(a_ , a_ )
if save_dir is None:
UpperCAmelCase_ = F"""{dataset}-{pair}"""
UpperCAmelCase_ = Path(a_ )
save_dir.mkdir(exist_ok=a_ )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
UpperCAmelCase_ = '''val''' if split == '''validation''' else split
UpperCAmelCase_ = save_dir.joinpath(F"""{fn}.source""" )
UpperCAmelCase_ = save_dir.joinpath(F"""{fn}.target""" )
UpperCAmelCase_ = src_path.open("w+" )
UpperCAmelCase_ = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCAmelCase_ = x['''translation''']
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 660 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''trocr'''
__UpperCAmelCase : str = ['''past_key_values''']
__UpperCAmelCase : Any = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , a_=5_0265 , a_=1024 , a_=12 , a_=16 , a_=4096 , a_="gelu" , a_=512 , a_=0.1 , a_=0.0 , a_=0.0 , a_=2 , a_=0.02 , a_=0.0 , a_=True , a_=False , a_=True , a_=True , a_=1 , a_=0 , a_=2 , **a_ , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = d_model
lowerCamelCase_ : Optional[Any] = decoder_layers
lowerCamelCase_ : Union[str, Any] = decoder_attention_heads
lowerCamelCase_ : int = decoder_ffn_dim
lowerCamelCase_ : List[Any] = activation_function
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Dict = dropout
lowerCamelCase_ : List[Any] = attention_dropout
lowerCamelCase_ : Optional[Any] = activation_dropout
lowerCamelCase_ : str = init_std
lowerCamelCase_ : List[str] = decoder_layerdrop
lowerCamelCase_ : Union[str, Any] = use_cache
lowerCamelCase_ : Optional[Any] = scale_embedding
lowerCamelCase_ : List[Any] = use_learned_position_embeddings
lowerCamelCase_ : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 250 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = CTRLTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowercase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_lowercase = {'''unk_token''': '''<unk>'''}
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , **UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = '''adapt react readapt apt'''
_lowercase = '''adapt react readapt apt'''
return input_text, output_text
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowercase = '''adapt react readapt apt'''
_lowercase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_lowercase = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowercase = tokens + [tokenizer.unk_token]
_lowercase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 398 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={}
class a__ ( _UpperCamelCase ):
lowerCamelCase : Optional[int] ="llama"
lowerCamelCase : int =["past_key_values"]
def __init__( self : str , a : Dict=3_20_00 , a : Optional[int]=40_96 , a : Optional[int]=1_10_08 , a : Dict=32 , a : Any=32 , a : str=None , a : Optional[int]="silu" , a : List[str]=20_48 , a : Dict=0.02 , a : Union[str, Any]=1e-6 , a : Dict=True , a : int=0 , a : Optional[int]=1 , a : Optional[int]=2 , a : Union[str, Any]=1 , a : Dict=False , a : Tuple=None , **a : str , ):
"""simple docstring"""
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = intermediate_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__lowerCamelCase = num_attention_heads
__lowerCamelCase = num_key_value_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = rms_norm_eps
__lowerCamelCase = pretraining_tp
__lowerCamelCase = use_cache
__lowerCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__lowerCamelCase = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__lowerCamelCase = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 546 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : int = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 362 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( _UpperCamelCase ):
def __init__( self : List[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 277 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52 | 0 |
from __future__ import annotations
from random import choice
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
return choice(a_ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : list[int] ,lowerCAmelCase_ : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =random_pivot(a_ )
# partition based on pivot
# linear time
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[e for e in lst if e < pivot]
SCREAMING_SNAKE_CASE_ : Any =[e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_ ) < k - 1:
return kth_number(a_ ,k - len(a_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(a_ ,a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __SCREAMING_SNAKE_CASE ( a__ : bytes ,a__ : int ) -> np.array:
__A : str = f"""{sampling_rate}"""
__A : Tuple = '''1'''
__A : Optional[int] = '''f32le'''
__A : Union[str, Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a_ ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
__A : List[str] = ffmpeg_process.communicate(a_ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
__A : List[Any] = output_stream[0]
__A : Union[str, Any] = np.frombuffer(a_ ,np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : float ,a__ : str = "f32le" ,) -> Tuple:
__A : int = f"""{sampling_rate}"""
__A : List[Any] = '''1'''
if format_for_conversion == "s16le":
__A : Dict = 2
elif format_for_conversion == "f32le":
__A : List[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
__A : List[Any] = platform.system()
if system == "Linux":
__A : List[Any] = '''alsa'''
__A : str = '''default'''
elif system == "Darwin":
__A : List[str] = '''avfoundation'''
__A : List[str] = ''':0'''
elif system == "Windows":
__A : Any = '''dshow'''
__A : List[str] = '''default'''
__A : Dict = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
__A : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__A : Optional[int] = _ffmpeg_stream(a_ ,a_ )
for item in iterator:
yield item
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : float ,a__ : Optional[int] = None ,a__ : Optional[Union[Tuple[float, float], float]] = None ,a__ : str = "f32le" ,) -> Optional[Any]:
if stream_chunk_s is not None:
__A : Optional[int] = stream_chunk_s
else:
__A : Optional[Any] = chunk_length_s
__A : Union[str, Any] = ffmpeg_microphone(a_ ,a_ ,format_for_conversion=a_ )
if format_for_conversion == "s16le":
__A : Tuple = np.intaa
__A : Optional[int] = 2
elif format_for_conversion == "f32le":
__A : str = np.floataa
__A : int = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
__A : Tuple = chunk_length_s / 6
__A : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a_ ,(int, float) ):
__A : Optional[int] = [stride_length_s, stride_length_s]
__A : List[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__A : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__A : Dict = datetime.datetime.now()
__A : List[Any] = datetime.timedelta(seconds=a_ )
for item in chunk_bytes_iter(a_ ,a_ ,stride=(stride_left, stride_right) ,stream=a_ ):
# Put everything back in numpy scale
__A : int = np.frombuffer(item["""raw"""] ,dtype=a_ )
__A : str = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
__A : List[str] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ,a__ : int ,a__ : Tuple[int, int] ,a__ : bool = False ) -> List[Any]:
__A : Dict = b''''''
__A : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
__A : Any = 0
for raw in iterator:
acc += raw
if stream and len(a_ ) < chunk_len:
__A : Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a_ ) >= chunk_len:
# We are flushing the accumulator
__A : Tuple = (_stride_left, stride_right)
__A : Optional[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
__A : Any = False
yield item
__A : Dict = stride_left
__A : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a_ ) > stride_left:
__A : Union[str, Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
__A : Tuple = False
yield item
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : int ) -> List[str]:
__A : List[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(a_ ,stdout=subprocess.PIPE ,bufsize=a_ ) as ffmpeg_process:
while True:
__A : Any = ffmpeg_process.stdout.read(a_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 17 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _a ( __UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
lowerCAmelCase__ : List[str] = checkpoint
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : Tuple = vae_state_dict['''encoder.conv_in.weight''']
lowerCAmelCase__ : Tuple = vae_state_dict['''encoder.conv_in.bias''']
lowerCAmelCase__ : List[str] = vae_state_dict['''encoder.conv_out.weight''']
lowerCAmelCase__ : int = vae_state_dict['''encoder.conv_out.bias''']
lowerCAmelCase__ : List[str] = vae_state_dict['''encoder.norm_out.weight''']
lowerCAmelCase__ : str = vae_state_dict['''encoder.norm_out.bias''']
lowerCAmelCase__ : Optional[int] = vae_state_dict['''decoder.conv_in.weight''']
lowerCAmelCase__ : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
lowerCAmelCase__ : str = vae_state_dict['''decoder.conv_out.weight''']
lowerCAmelCase__ : int = vae_state_dict['''decoder.conv_out.bias''']
lowerCAmelCase__ : Dict = vae_state_dict['''decoder.norm_out.weight''']
lowerCAmelCase__ : List[str] = vae_state_dict['''decoder.norm_out.bias''']
lowerCAmelCase__ : List[Any] = vae_state_dict['''quant_conv.weight''']
lowerCAmelCase__ : Tuple = vae_state_dict['''quant_conv.bias''']
lowerCAmelCase__ : int = vae_state_dict['''post_quant_conv.weight''']
lowerCAmelCase__ : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase__ : Tuple = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
lowerCAmelCase__ : Optional[int] = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(a_ )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase__ : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
lowerCAmelCase__ : List[Any] = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(a_ )
}
for i in range(a_ ):
lowerCAmelCase__ : Dict = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
lowerCAmelCase__ : List[Any] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
lowerCAmelCase__ : Any = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
lowerCAmelCase__ : List[str] = renew_vae_resnet_paths(a_ )
lowerCAmelCase__ : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(a_ ,a_ ,a_ ,additional_replacements=[meta_path] ,config=a_ )
lowerCAmelCase__ : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowerCAmelCase__ : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
lowerCAmelCase__ : List[str] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
lowerCAmelCase__ : Union[str, Any] = renew_vae_resnet_paths(a_ )
lowerCAmelCase__ : str = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a_ ,a_ ,a_ ,additional_replacements=[meta_path] ,config=a_ )
lowerCAmelCase__ : List[Any] = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowerCAmelCase__ : List[str] = renew_vae_attention_paths(a_ )
lowerCAmelCase__ : Optional[Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ ,a_ ,a_ ,additional_replacements=[meta_path] ,config=a_ )
conv_attn_to_linear(a_ )
for i in range(a_ ):
lowerCAmelCase__ : str = num_up_blocks - 1 - i
lowerCAmelCase__ : Any = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
lowerCAmelCase__ : Optional[int] = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
lowerCAmelCase__ : List[str] = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
lowerCAmelCase__ : Union[str, Any] = renew_vae_resnet_paths(a_ )
lowerCAmelCase__ : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(a_ ,a_ ,a_ ,additional_replacements=[meta_path] ,config=a_ )
lowerCAmelCase__ : str = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowerCAmelCase__ : List[str] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
lowerCAmelCase__ : Union[str, Any] = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
lowerCAmelCase__ : Optional[Any] = renew_vae_resnet_paths(a_ )
lowerCAmelCase__ : List[str] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a_ ,a_ ,a_ ,additional_replacements=[meta_path] ,config=a_ )
lowerCAmelCase__ : Any = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowerCAmelCase__ : Any = renew_vae_attention_paths(a_ )
lowerCAmelCase__ : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ ,a_ ,a_ ,additional_replacements=[meta_path] ,config=a_ )
conv_attn_to_linear(a_ )
return new_checkpoint
def _a ( __UpperCamelCase : str ,__UpperCamelCase : str ,):
# Only support V1
lowerCAmelCase__ : List[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
lowerCAmelCase__ : Tuple = io.BytesIO(r.content )
lowerCAmelCase__ : Tuple = OmegaConf.load(a_ )
lowerCAmelCase__ : Union[str, Any] = 512
lowerCAmelCase__ : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
lowerCAmelCase__ : Tuple = {}
with safe_open(a_ ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
lowerCAmelCase__ : str = f.get_tensor(a_ )
else:
lowerCAmelCase__ : List[str] = torch.load(a_ ,map_location=a_ )['''state_dict''']
# Convert the VAE model.
lowerCAmelCase__ : int = create_vae_diffusers_config(a_ ,image_size=a_ )
lowerCAmelCase__ : Tuple = custom_convert_ldm_vae_checkpoint(a_ ,a_ )
lowerCAmelCase__ : Optional[int] = AutoencoderKL(**a_ )
vae.load_state_dict(a_ )
vae.save_pretrained(a_ )
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
A__ : Union[str, Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 233 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 52 | 0 |
"""simple docstring"""
from collections import defaultdict
def lowercase_ ( _lowercase : str , _lowercase : str ):
'''simple docstring'''
UpperCAmelCase : List[Any] = first_str.lower().strip()
UpperCAmelCase : str = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase : Dict = first_str.replace(" " , "" )
UpperCAmelCase : Optional[Any] = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(a_ ) != len(a_ ):
return False
# Default values for count should be 0
UpperCAmelCase : defaultdict[str, int] = defaultdict(a_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(a_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case_ : Optional[Any] = input("""Enter the first string """).strip()
snake_case_ : Optional[Any] = input("""Enter the second string """).strip()
snake_case_ : Optional[Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 595 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase = {
"""google/rembert""": 2_56,
}
lowerCAmelCase = """▁"""
class lowerCamelCase ( _UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = RemBertTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_=True , a_=False , a_="[CLS]" , a_="[SEP]" , a_="<unk>" , a_="[SEP]" , a_="<pad>" , a_="[CLS]" , a_="[MASK]" , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
lowerCAmelCase : Tuple = do_lower_case
lowerCAmelCase : Tuple = remove_space
lowerCAmelCase : Tuple = keep_accents
lowerCAmelCase : str = vocab_file
lowerCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self , a_ , a_ = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def _lowerCamelCase ( self , a_ , a_ = None ):
lowerCAmelCase : List[str] = [self.sep_token_id]
lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(_UpperCAmelCase ) )
return
lowerCAmelCase : int = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 525 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 0 |
'''simple docstring'''
from __future__ import annotations
__snake_case : Optional[Any] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase_ = {}
UpperCAmelCase_ = source_vertex
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = {self.source_vertex}
UpperCAmelCase_ = None
UpperCAmelCase_ = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_UpperCAmelCase )
UpperCAmelCase_ = vertex
queue.append(_UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase_ = self.parent.get(_UpperCAmelCase )
if target_vertex_parent is None:
UpperCAmelCase_ = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(_UpperCAmelCase )
return self.shortest_path(_UpperCAmelCase ) + F"""->{target_vertex}"""
if __name__ == "__main__":
__snake_case : int = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 660 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.dummy_uncond_unet
lowerCamelCase_ : Union[str, Any] = ScoreSdeVeScheduler()
lowerCamelCase_ : Dict = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_UpperCAmelCase ).images
lowerCamelCase_ : Optional[int] = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_UpperCAmelCase , return_dict=_UpperCAmelCase )[
0
]
lowerCamelCase_ : int = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = '''google/ncsnpp-church-256'''
lowerCamelCase_ : List[Any] = UNetaDModel.from_pretrained(_UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = ScoreSdeVeScheduler.from_pretrained(_UpperCAmelCase )
lowerCamelCase_ : Any = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCamelCase_ : List[str] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=_UpperCAmelCase ).images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ : List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 250 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['a', 'b', 'c']
# Defaults to last layer if both are None
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ['c'] )
self.assertEqual(lowerCAmelCase_ , [2] )
# Out indices set to match out features
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(['a', 'c'] , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ['a', 'c'] )
self.assertEqual(lowerCAmelCase_ , [0, 2] )
# Out features set to match out indices
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(lowerCAmelCase_ , [0, 2] , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ['a', 'c'] )
self.assertEqual(lowerCAmelCase_ , [0, 2] )
# Out features selected from negative indices
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(lowerCAmelCase_ , [-3, -1] , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ['a', 'c'] )
self.assertEqual(lowerCAmelCase_ , [-3, -1] )
def lowercase ( self : List[Any] ) -> Dict:
# Stage names must be set
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , lowerCAmelCase_ )
# Out features must be a list
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(lowerCAmelCase_ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(lowerCAmelCase_ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = BackboneMixin()
__lowerCAmelCase = ['a', 'b', 'c']
__lowerCAmelCase = ['a', 'c']
__lowerCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__lowerCAmelCase = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
__lowerCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 53 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowercase ( self : List[Any] ) -> List[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowercase ( self : int ) -> int:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(lowerCAmelCase_ )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", )
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(lowerCAmelCase_, data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : str ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'], padding=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'] if training_args.do_train else None, eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir, 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a_ ( lowerCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_snake_case : str = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 53 |
def a_ ( lowerCAmelCase_ : int = 200_0000 ):
__lowerCAmelCase = [0 for i in range(n + 1 )]
__lowerCAmelCase = 1
__lowerCAmelCase = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, lowerCAmelCase_ ):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__lowerCAmelCase = BitConfig(
conv_layer=lowerCAmelCase_, num_labels=1000, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_, )
return config
def a_ ( lowerCAmelCase_ : List[str] ):
if "stem.conv" in name:
__lowerCAmelCase = name.replace('stem.conv', 'bit.embedder.convolution' )
if "blocks" in name:
__lowerCAmelCase = name.replace('blocks', 'layers' )
if "head.fc" in name:
__lowerCAmelCase = name.replace('head.fc', 'classifier.1' )
if name.startswith('norm' ):
__lowerCAmelCase = 'bit.' + name
if "bit" not in name and "classifier" not in name:
__lowerCAmelCase = 'bit.encoder.' + name
return name
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Any, lowerCAmelCase_ : Optional[int]=False ):
__lowerCAmelCase = get_config(lowerCAmelCase_ )
# load original model from timm
__lowerCAmelCase = create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model
__lowerCAmelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(lowerCAmelCase_ )
__lowerCAmelCase = val.squeeze() if 'head' in key else val
# load HuggingFace model
__lowerCAmelCase = BitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# create image processor
__lowerCAmelCase = create_transform(**resolve_data_config({}, model=lowerCAmelCase_ ) )
__lowerCAmelCase = transform.transforms
__lowerCAmelCase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
__lowerCAmelCase = BitImageProcessor(
do_resize=lowerCAmelCase_, size={'shortest_edge': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=lowerCAmelCase_, crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]}, do_normalize=lowerCAmelCase_, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = transform(lowerCAmelCase_ ).unsqueeze(0 )
__lowerCAmelCase = processor(lowerCAmelCase_, return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ )
# verify logits
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )
__lowerCAmelCase = outputs.logits
print('Logits:', logits[0, :3] )
print('Predicted class:', model.config.idalabel[logits.argmax(-1 ).item()] )
__lowerCAmelCase = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
_snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
_snake_case : Dict = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 53 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_snake_case : Tuple = logging.getLogger()
_snake_case : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any , lowerCAmelCase_ : Dict ) -> Optional[int]:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowerCAmelCase = {'source': 'What is love ?', 'target': 'life'}
__lowerCAmelCase = {'train': 1_2, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__lowerCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCAmelCase_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str = "pytorch" ) -> List[str]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'output' )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'data' )
self._create_dummy_data(data_dir=lowerCAmelCase_ )
__lowerCAmelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'metrics.json' )
with open(lowerCAmelCase_ ) as f:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
return result
@require_torch_gpu
def lowercase ( self : str ) -> int:
__lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 53 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
_snake_case : Dict = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
_snake_case : str = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str] ):
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = json.loads(f.read() )
__lowerCAmelCase = collections.OrderedDict()
__lowerCAmelCase = collections.OrderedDict()
__lowerCAmelCase = collections.OrderedDict()
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = b
__lowerCAmelCase = idx
for wd in b:
__lowerCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple="<|endoftext|>" , lowerCAmelCase_ : Optional[int]="<|endoftext|>" , lowerCAmelCase_ : Optional[Any]="<|startoftext|>" , lowerCAmelCase_ : Optional[Any]="<|endoftext|>" , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : List[str] , ) -> Any:
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
__lowerCAmelCase = do_clean_text
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowercase ( self : List[Any] ) -> List[Any]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def lowercase ( self : Any ) -> Tuple:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] ) -> Tuple:
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Dict ) -> Optional[int]:
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token ) )
def lowercase ( self : List[str] , lowerCAmelCase_ : int ) -> int:
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]:
__lowerCAmelCase = ''.join(lowerCAmelCase_ ).strip()
return out_string
def lowercase ( self : List[str] , lowerCAmelCase_ : "Conversation" ) -> List[int]:
__lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [self.eos_token_id] )
if len(lowerCAmelCase_ ) > self.model_max_length:
__lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def lowercase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = 0
if os.path.isdir(lowerCAmelCase_ ):
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
__lowerCAmelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__lowerCAmelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
__lowerCAmelCase = token_index
writer.write(','.join(lowerCAmelCase_ ) + '\n' )
index += 1
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , lowerCAmelCase_ )
return vocab_file, emoji_file
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = vocab # same as swe
__lowerCAmelCase = ids_to_tokens # same as bpe
__lowerCAmelCase = emoji
__lowerCAmelCase = np.max([len(lowerCAmelCase_ ) for w in self.vocab.keys()] )
__lowerCAmelCase = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
__lowerCAmelCase = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
__lowerCAmelCase = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
__lowerCAmelCase = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__lowerCAmelCase = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__lowerCAmelCase = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
__lowerCAmelCase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__lowerCAmelCase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__lowerCAmelCase = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : Union[str, Any] ) -> int:
return len(self.ids_to_tokens )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = self.content_repattera.sub('<URL>' , lowerCAmelCase_ )
__lowerCAmelCase = self.content_repattera.sub('<EMAIL>' , lowerCAmelCase_ )
__lowerCAmelCase = self.content_repattera.sub('<TEL>' , lowerCAmelCase_ )
__lowerCAmelCase = self.content_repattera.sub('<DATE>' , lowerCAmelCase_ )
__lowerCAmelCase = self.content_repattera.sub('<DATE>' , lowerCAmelCase_ )
__lowerCAmelCase = self.content_repattera.sub('<PRICE>' , lowerCAmelCase_ )
__lowerCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCAmelCase = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=False ) -> Any:
__lowerCAmelCase = text.replace(' ' , '<SP>' )
__lowerCAmelCase = text.replace(' ' , '<SP>' )
__lowerCAmelCase = text.replace('\r\n' , '<BR>' )
__lowerCAmelCase = text.replace('\n' , '<BR>' )
__lowerCAmelCase = text.replace('\r' , '<BR>' )
__lowerCAmelCase = text.replace('\t' , '<TAB>' )
__lowerCAmelCase = text.replace('—' , 'ー' )
__lowerCAmelCase = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCAmelCase = text.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if clean:
__lowerCAmelCase = self.clean_text(lowerCAmelCase_ )
def check_simbol(lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = x.encode()
if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 2:
__lowerCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = x.encode()
if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 3:
__lowerCAmelCase = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
__lowerCAmelCase = 0
__lowerCAmelCase = []
while pos < len(lowerCAmelCase_ ):
__lowerCAmelCase = min(len(lowerCAmelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__lowerCAmelCase = [] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1 ):
__lowerCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_ ) > 2:
__lowerCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase_ ) > 0:
# the smallest token_id is adopted
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[0] )[0]
result.append(lowerCAmelCase_ )
__lowerCAmelCase = e
else:
__lowerCAmelCase = pos + 1
__lowerCAmelCase = text[pos:end]
if check_simbol(lowerCAmelCase_ ):
result.append('<KIGOU>' )
elif checkuae(lowerCAmelCase_ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
__lowerCAmelCase = end
return result
def lowercase ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]="\n" ) -> Tuple:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase_ ) > 0:
words.append(bytearray(lowerCAmelCase_ ).decode('utf-8' , errors='replace' ) )
__lowerCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(lowerCAmelCase_ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
words.append(bytearray(lowerCAmelCase_ ).decode('utf-8' , errors='replace' ) )
__lowerCAmelCase = ''.join(lowerCAmelCase_ )
return text
| 53 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]="resnet50" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : List[Any] ) -> Union[str, Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> int:
__lowerCAmelCase = TimmBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase ( self : List[str] ) -> Tuple:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase ( self : str ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Any ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase ( self : Dict ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Any ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Tuple ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase ( self : int ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase ( self : Dict ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[str] ) -> Optional[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
| 53 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = DiTPipeline
a_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
a_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
a_ = False
def lowercase ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=lowerCAmelCase_ , )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowercase ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple=0 ) -> int:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = 'cpu'
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
__lowerCAmelCase = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 )
def lowercase ( self : List[Any] ) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase ( self : List[str] ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase = pipe.get_label_ids(lowerCAmelCase_ )
__lowerCAmelCase = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase ( self : int ) -> int:
__lowerCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase = ['vase', 'umbrella']
__lowerCAmelCase = pipe.get_label_ids(lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 53 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a_ ( lowerCAmelCase_ : str=None ):
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser('env' )
else:
__lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file', default=lowerCAmelCase_, help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = is_xpu_available()
__lowerCAmelCase = is_npu_available()
__lowerCAmelCase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
__lowerCAmelCase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'PyTorch XPU available': str(lowerCAmelCase_ ),
'PyTorch NPU available': str(lowerCAmelCase_ ),
'System RAM': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__lowerCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCAmelCase = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_, lowerCAmelCase_ )
else F"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__lowerCAmelCase = accelerate_config
return info
def a_ ( ):
__lowerCAmelCase = env_command_parser()
__lowerCAmelCase = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 53 | 1 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """pixel_values"""
a_ = False
a_ = TimmBackboneConfig
def __init__( self : Tuple , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
requires_backends(self , 'timm' )
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCAmelCase_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCAmelCase = getattr(lowerCAmelCase_ , 'use_pretrained_backbone' , lowerCAmelCase_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(lowerCAmelCase_ , 'out_indices' , lowerCAmelCase_ ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=lowerCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase_ , **lowerCAmelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer['module']: str(lowerCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : int , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop('use_timm_backbone' , lowerCAmelCase_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
__lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , features_only=lowerCAmelCase_ , use_pretrained_backbone=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , )
return super()._from_config(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
pass
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = tuple(lowerCAmelCase_ ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , attentions=lowerCAmelCase_ )
| 53 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
__lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=lowerCAmelCase_, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=lowerCAmelCase_, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=lowerCAmelCase_ )
return parser.parse_args()
def a_ ( ):
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 53 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_snake_case : List[Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Tuple=[2, 2, 3, 2] , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = num_stages
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase ( self : Dict ) -> List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase_ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = UperNetForSemanticSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = UperNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Tuple ) -> Union[str, Any]:
return
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : str ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> List[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
__lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowercase ( self : Any ) -> int:
pass
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' )
__lowerCAmelCase = Image.open(lowerCAmelCase_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Tuple = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
if issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = text_path
elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = 'train'
__lowerCAmelCase = {'train': text_path, 'test': text_path}
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 53 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def lowercase ( self : Tuple ) -> Any:
__lowerCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
__lowerCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__lowerCAmelCase = bertabert.config.encoder.vocab_size
__lowerCAmelCase = tokenizer.sep_token_id
__lowerCAmelCase = tokenizer.cls_token_id
__lowerCAmelCase = 1_2_8
__lowerCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
__lowerCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
__lowerCAmelCase = train_dataset.select(range(3_2 ) )
__lowerCAmelCase = val_dataset.select(range(1_6 ) )
__lowerCAmelCase = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase_ : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=lowerCAmelCase_ , max_length=5_1_2 )
__lowerCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowerCAmelCase_ , max_length=1_2_8 )
__lowerCAmelCase = inputs.input_ids
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = outputs.input_ids
__lowerCAmelCase = outputs.input_ids.copy()
__lowerCAmelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__lowerCAmelCase = outputs.attention_mask
assert all(len(lowerCAmelCase_ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowerCAmelCase_ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = pred.label_ids
__lowerCAmelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase_ ) )] ) / len(lowerCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
__lowerCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase_ , per_device_train_batch_size=lowerCAmelCase_ , per_device_eval_batch_size=lowerCAmelCase_ , predict_with_generate=lowerCAmelCase_ , evaluation_strategy='steps' , do_train=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCAmelCase = SeqaSeqTrainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , )
# start training
trainer.train()
| 53 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : str = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """Whether ot not to use whole word mask."""} )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
a_ = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
a_ = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def a_ ( lowerCAmelCase_ : DataTrainingArguments, lowerCAmelCase_ : PreTrainedTokenizer, lowerCAmelCase_ : bool = False, lowerCAmelCase_ : Optional[str] = None, ):
def _dataset(lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=lowerCAmelCase_, file_path=lowerCAmelCase_, block_size=args.block_size, ref_path=lowerCAmelCase_, )
return LineByLineTextDataset(tokenizer=lowerCAmelCase_, file_path=lowerCAmelCase_, block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCAmelCase_, file_path=lowerCAmelCase_, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=lowerCAmelCase_, )
if evaluate:
return _dataset(args.eval_data_file, args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCAmelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file, args.train_ref_file )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelWithLMHead.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
__lowerCAmelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowerCAmelCase = min(data_args.block_size, tokenizer.max_len )
# Get datasets
__lowerCAmelCase = (
get_dataset(lowerCAmelCase_, tokenizer=lowerCAmelCase_, cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowerCAmelCase = (
get_dataset(lowerCAmelCase_, tokenizer=lowerCAmelCase_, evaluate=lowerCAmelCase_, cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowerCAmelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCAmelCase_, plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length, )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowerCAmelCase = DataCollatorForWholeWordMask(
tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
else:
__lowerCAmelCase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase_, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, data_collator=lowerCAmelCase_, train_dataset=lowerCAmelCase_, eval_dataset=lowerCAmelCase_, prediction_loss_only=lowerCAmelCase_, )
# Training
if training_args.do_train:
__lowerCAmelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCAmelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = {'perplexity': perplexity}
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s', lowerCAmelCase_, str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(lowerCAmelCase_ )
return results
def a_ ( lowerCAmelCase_ : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _UpperCAmelCase :
"""simple docstring"""
def lowercase ( self : Dict , lowerCAmelCase_ : Tuple ) -> str:
raise NotImplementedError()
def lowercase ( self : Optional[int] ) -> Any:
raise NotImplementedError()
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : "AutoTokenizer" , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = tokenizer
__lowerCAmelCase = skip_prompt
__lowerCAmelCase = decode_kwargs
# variables used in the streaming process
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = True
def lowercase ( self : List[str] , lowerCAmelCase_ : Any ) -> List[str]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
__lowerCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__lowerCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__lowerCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
__lowerCAmelCase = text[self.print_len :]
__lowerCAmelCase = []
__lowerCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__lowerCAmelCase = text[self.print_len :]
self.print_len += len(lowerCAmelCase_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__lowerCAmelCase = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(lowerCAmelCase_ )
self.on_finalized_text(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Optional[int]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
__lowerCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__lowerCAmelCase = text[self.print_len :]
__lowerCAmelCase = []
__lowerCAmelCase = 0
else:
__lowerCAmelCase = ''
__lowerCAmelCase = True
self.on_finalized_text(lowerCAmelCase_ , stream_end=lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> List[str]:
print(lowerCAmelCase_ , flush=lowerCAmelCase_ , end='' if not stream_end else None )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : "AutoTokenizer" , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[float] = None , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = Queue()
__lowerCAmelCase = None
__lowerCAmelCase = timeout
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> Tuple:
self.text_queue.put(lowerCAmelCase_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Tuple ) -> str:
return self
def lowercase ( self : Any ) -> Optional[int]:
__lowerCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 53 |
import math
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int ):
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
__lowerCAmelCase = 0
while arr[min(lowerCAmelCase_, lowerCAmelCase_ ) - 1] < x:
__lowerCAmelCase = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCAmelCase = prev + 1
if prev == min(lowerCAmelCase_, lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case : List[str] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Optional[Any] = [int(item) for item in user_input.split(',')]
_snake_case : List[str] = int(input('Enter the number to be searched:\n'))
_snake_case : Optional[int] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""")
| 53 | 1 |
import numpy
# List of input, output pairs
_snake_case : Optional[int] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_snake_case : List[str] = (((515, 22, 13), 555), ((61, 35, 49), 150))
_snake_case : str = [2, 4, 1, 5]
_snake_case : List[Any] = len(train_data)
_snake_case : str = 0.0_09
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[Any]="train" ):
return calculate_hypothesis_value(lowerCAmelCase_, lowerCAmelCase_ ) - output(
lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = 0
for i in range(len(lowerCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : str ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : str ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Any=m ):
__lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if index == -1:
summation_value += _error(lowerCAmelCase_ )
else:
summation_value += _error(lowerCAmelCase_ ) * train_data[i][0][index]
return summation_value
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = summation_of_cost_derivative(lowerCAmelCase_, lowerCAmelCase_ ) / m
return cost_derivative_value
def a_ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_0002
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0, len(lowerCAmelCase_ ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase_, lowerCAmelCase_, atol=lowerCAmelCase_, rtol=lowerCAmelCase_, ):
break
__lowerCAmelCase = temp_parameter_vector
print(('Number of iterations:', j) )
def a_ ( ):
for i in range(len(lowerCAmelCase_ ) ):
print(('Actual output value:', output(lowerCAmelCase_, 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowerCAmelCase_, 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 53 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Tuple = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """resnet"""
a_ = ["""basic""", """bottleneck"""]
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=6_4 , lowerCAmelCase_ : List[Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Tuple="bottleneck" , lowerCAmelCase_ : Dict="relu" , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : str , ) -> Tuple:
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
__lowerCAmelCase = num_channels
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = layer_type
__lowerCAmelCase = hidden_act
__lowerCAmelCase = downsample_in_first_stage
__lowerCAmelCase = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : str ) -> float:
return 1e-3
| 53 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = sd_pipe.prepare_inputs(lowerCAmelCase_ )
__lowerCAmelCase = replicate(lowerCAmelCase_ )
__lowerCAmelCase = shard(lowerCAmelCase_ )
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = jax.random.split(lowerCAmelCase_ , jax.device_count() )
__lowerCAmelCase = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=2_5 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCAmelCase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCAmelCase = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase ( self : List[str] ) -> List[Any]:
__lowerCAmelCase = 'stabilityai/stable-diffusion-2'
__lowerCAmelCase , __lowerCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
__lowerCAmelCase , __lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase_ , scheduler=lowerCAmelCase_ , revision='bf16' , dtype=jnp.bfloataa , )
__lowerCAmelCase = scheduler_params
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = sd_pipe.prepare_inputs(lowerCAmelCase_ )
__lowerCAmelCase = replicate(lowerCAmelCase_ )
__lowerCAmelCase = shard(lowerCAmelCase_ )
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = jax.random.split(lowerCAmelCase_ , jax.device_count() )
__lowerCAmelCase = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=2_5 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCAmelCase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCAmelCase = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case : List[Any] = True
from torch.cuda.amp import autocast
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : str=None ):
return field(default_factory=lambda: default, metadata=lowerCAmelCase_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a_ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
a_ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
a_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
a_ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
def __call__( self : int , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCAmelCase = [{'input_values': feature['input_values']} for feature in features]
__lowerCAmelCase = [{'input_ids': feature['labels']} for feature in features]
__lowerCAmelCase = self.processor.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCAmelCase = self.processor.pad(
labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCAmelCase = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__lowerCAmelCase = labels
return batch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__lowerCAmelCase = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCAmelCase = datasets.load_dataset(
'common_voice', data_args.dataset_config_name, split=data_args.train_split_name )
__lowerCAmelCase = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test' )
# Create and save tokenizer
__lowerCAmelCase = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCAmelCase_ : Any ):
__lowerCAmelCase = re.sub(lowerCAmelCase_, '', batch['sentence'] ).lower() + ' '
return batch
__lowerCAmelCase = train_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
__lowerCAmelCase = eval_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
def extract_all_chars(lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = ' '.join(batch['text'] )
__lowerCAmelCase = list(set(lowerCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=train_dataset.column_names, )
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=eval_dataset.column_names, )
__lowerCAmelCase = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCAmelCase = {v: k for k, v in enumerate(lowerCAmelCase_ )}
__lowerCAmelCase = vocab_dict[' ']
del vocab_dict[" "]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
with open('vocab.json', 'w' ) as vocab_file:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = WavaVecaCTCTokenizer(
'vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|', )
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCAmelCase_ ), data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCAmelCase_ ) )
if data_args.max_val_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCAmelCase = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase_ : int ):
__lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch['path'] )
__lowerCAmelCase = resampler(lowerCAmelCase_ ).squeeze().numpy()
__lowerCAmelCase = 1_6000
__lowerCAmelCase = batch['text']
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCAmelCase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__lowerCAmelCase = processor(
audio=batch['speech'], text=batch['target_text'], sampling_rate=batch['sampling_rate'][0] )
batch.update(lowerCAmelCase_ )
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
# Metric
__lowerCAmelCase = datasets.load_metric('wer' )
def compute_metrics(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = pred.predictions
__lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=-1 )
__lowerCAmelCase = processor.tokenizer.pad_token_id
__lowerCAmelCase = processor.batch_decode(lowerCAmelCase_ )
# we do not want to group tokens when computing the metrics
__lowerCAmelCase = processor.batch_decode(pred.label_ids, group_tokens=lowerCAmelCase_ )
__lowerCAmelCase = wer_metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCAmelCase = DataCollatorCTCWithPadding(processor=lowerCAmelCase_, padding=lowerCAmelCase_ )
# Initialize our Trainer
__lowerCAmelCase = CTCTrainer(
model=lowerCAmelCase_, data_collator=lowerCAmelCase_, args=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('train', lowerCAmelCase_ )
trainer.save_metrics('train', lowerCAmelCase_ )
trainer.save_state()
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ )
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('eval', lowerCAmelCase_ )
trainer.save_metrics('eval', lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 53 | 1 |
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 53 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_snake_case : str = {
'yjernite/retribert-base-uncased': 512,
}
_snake_case : Optional[int] = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = RetriBertTokenizer
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : List[str]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53 | 1 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCAmelCase , __lowerCAmelCase = array[indexa], array[indexa]
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
if length > 1:
__lowerCAmelCase = int(length / 2 )
for i in range(lowerCAmelCase_, low + middle ):
comp_and_swap(lowerCAmelCase_, lowerCAmelCase_, i + middle, lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_, low + middle, lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
if length > 1:
__lowerCAmelCase = int(length / 2 )
bitonic_sort(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, 1 )
bitonic_sort(lowerCAmelCase_, low + middle, lowerCAmelCase_, 0 )
bitonic_merge(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : str = input('Enter numbers separated by a comma:\n').strip()
_snake_case : List[Any] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 53 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_snake_case : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
_snake_case : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a_ ( ):
__lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def a_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase_, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def a_ ( ):
__lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a_ ( ):
__lowerCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(lowerCAmelCase_ )
# assert canny array for at least one True
assert canny_array.any()
def a_ ( ):
assert gg.gaussian_filter(lowerCAmelCase_, 5, sigma=0.9 ).all()
def a_ ( ):
# laplace diagonals
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(lowerCAmelCase_, lowerCAmelCase_ ).astype(lowerCAmelCase_ )
assert res.any()
def a_ ( ):
assert med.median_filter(lowerCAmelCase_, 3 ).any()
def a_ ( ):
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(lowerCAmelCase_ )
assert grad.any() and theta.any()
def a_ ( ):
__lowerCAmelCase = sp.make_sepia(lowerCAmelCase_, 20 )
assert sepia.all()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
__lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase_, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
__lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase_, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def a_ ( ):
__lowerCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(lowerCAmelCase_, 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert lbp_image.any()
| 53 | 1 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
if issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = text_path
elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = 'train'
__lowerCAmelCase = {'train': text_path, 'test': text_path}
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 53 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = checkpoint
__lowerCAmelCase = {}
__lowerCAmelCase = vae_state_dict['encoder.conv_in.weight']
__lowerCAmelCase = vae_state_dict['encoder.conv_in.bias']
__lowerCAmelCase = vae_state_dict['encoder.conv_out.weight']
__lowerCAmelCase = vae_state_dict['encoder.conv_out.bias']
__lowerCAmelCase = vae_state_dict['encoder.norm_out.weight']
__lowerCAmelCase = vae_state_dict['encoder.norm_out.bias']
__lowerCAmelCase = vae_state_dict['decoder.conv_in.weight']
__lowerCAmelCase = vae_state_dict['decoder.conv_in.bias']
__lowerCAmelCase = vae_state_dict['decoder.conv_out.weight']
__lowerCAmelCase = vae_state_dict['decoder.conv_out.bias']
__lowerCAmelCase = vae_state_dict['decoder.norm_out.weight']
__lowerCAmelCase = vae_state_dict['decoder.norm_out.bias']
__lowerCAmelCase = vae_state_dict['quant_conv.weight']
__lowerCAmelCase = vae_state_dict['quant_conv.bias']
__lowerCAmelCase = vae_state_dict['post_quant_conv.weight']
__lowerCAmelCase = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
__lowerCAmelCase = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
__lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the decoder up blocks only
__lowerCAmelCase = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
__lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ )
}
for i in range(lowerCAmelCase_ ):
__lowerCAmelCase = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__lowerCAmelCase = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
__lowerCAmelCase = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""down.{i}.block""", 'new': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'encoder.mid.block' in key]
__lowerCAmelCase = 2
for i in range(1, num_mid_res_blocks + 1 ):
__lowerCAmelCase = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
__lowerCAmelCase = renew_vae_attention_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
conv_attn_to_linear(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
__lowerCAmelCase = num_up_blocks - 1 - i
__lowerCAmelCase = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__lowerCAmelCase = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
__lowerCAmelCase = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""up.{block_id}.block""", 'new': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'decoder.mid.block' in key]
__lowerCAmelCase = 2
for i in range(1, num_mid_res_blocks + 1 ):
__lowerCAmelCase = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
__lowerCAmelCase = renew_vae_attention_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
conv_attn_to_linear(lowerCAmelCase_ )
return new_checkpoint
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, ):
# Only support V1
__lowerCAmelCase = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
__lowerCAmelCase = io.BytesIO(r.content )
__lowerCAmelCase = OmegaConf.load(lowerCAmelCase_ )
__lowerCAmelCase = 512
__lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
__lowerCAmelCase = {}
with safe_open(lowerCAmelCase_, framework='pt', device='cpu' ) as f:
for key in f.keys():
__lowerCAmelCase = f.get_tensor(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location=lowerCAmelCase_ )['state_dict']
# Convert the VAE model.
__lowerCAmelCase = create_vae_diffusers_config(lowerCAmelCase_, image_size=lowerCAmelCase_ )
__lowerCAmelCase = custom_convert_ldm_vae_checkpoint(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = AutoencoderKL(**lowerCAmelCase_ )
vae.load_state_dict(lowerCAmelCase_ )
vae.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_snake_case : Union[str, Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 53 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[str]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_0_0
return config
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase ( self : Optional[int] ) -> Tuple:
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
import re
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = re.compile(R'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowerCAmelCase_, lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 53 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Union[str, Any] = 2
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , *, # begin keyword-only arguments
lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : Dict="<pad>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = bos, unk, pad, eos
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = len(self.symbols )
def __eq__( self : Dict , lowerCAmelCase_ : Dict ) -> str:
return self.indices == other.indices
def __getitem__( self : List[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple ) -> List[Any]:
return len(self.symbols )
def __contains__( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]:
return sym in self.indices
@classmethod
def lowercase ( cls : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = cls()
d.add_from_file(lowerCAmelCase_ )
return d
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Any=False ) -> Optional[Any]:
if word in self.indices and not overwrite:
__lowerCAmelCase = self.indices[word]
__lowerCAmelCase = self.count[idx] + n
return idx
else:
__lowerCAmelCase = len(self.symbols )
__lowerCAmelCase = idx
self.symbols.append(lowerCAmelCase_ )
self.count.append(lowerCAmelCase_ )
return idx
def lowercase ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return 0
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> int:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowerCAmelCase_ ) )
return
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = self._load_meta(lowerCAmelCase_ )
for line in lines[indices_start_line:]:
try:
__lowerCAmelCase , __lowerCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = line.rsplit(' ' , 1 )
else:
__lowerCAmelCase = False
__lowerCAmelCase = int(lowerCAmelCase_ )
__lowerCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowerCAmelCase_ ) )
self.add_symbol(lowerCAmelCase_ , n=lowerCAmelCase_ , overwrite=lowerCAmelCase_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def a_ ( lowerCAmelCase_ : List[str] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase = dict((re.sub(R'@@$', '', lowerCAmelCase_ ), v) if k.endswith('@@' ) else (re.sub(R'$', '</w>', lowerCAmelCase_ ), v) for k, v in d.items() )
__lowerCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCAmelCase = d[k] # restore
return da
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
# prep
if not os.path.exists(lowerCAmelCase_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'checkpoint.pt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = chkpt['cfg']['model']
# dicts
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'dict.txt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__lowerCAmelCase = Dictionary.load(lowerCAmelCase_ )
__lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# merges_file (bpecodes)
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'bpecodes' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCAmelCase_, lowerCAmelCase_ )
# model config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'config.json' )
__lowerCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# tokenizer config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# model
__lowerCAmelCase = chkpt['model']
# remove unneeded keys
__lowerCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
else:
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
__lowerCAmelCase = BioGptConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = BioGptForCausalLM(lowerCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase_ )
# save
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
print('Conversion is done!' )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53 | 1 |
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 53 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """pixel_values"""
a_ = False
a_ = TimmBackboneConfig
def __init__( self : Tuple , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
requires_backends(self , 'timm' )
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCAmelCase_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCAmelCase = getattr(lowerCAmelCase_ , 'use_pretrained_backbone' , lowerCAmelCase_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(lowerCAmelCase_ , 'out_indices' , lowerCAmelCase_ ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=lowerCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase_ , **lowerCAmelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer['module']: str(lowerCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : int , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop('use_timm_backbone' , lowerCAmelCase_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
__lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , features_only=lowerCAmelCase_ , use_pretrained_backbone=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , )
return super()._from_config(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
pass
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = tuple(lowerCAmelCase_ ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , attentions=lowerCAmelCase_ )
| 53 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 53 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[float] ):
if len(lowerCAmelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : Optional[Any] = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['MaskFormerFeatureExtractor']
_snake_case : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
_snake_case : Optional[int] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 53 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="relu" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : Tuple ) -> List[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_ = False
a_ = False
a_ = False
def lowercase ( self : Dict ) -> None:
__lowerCAmelCase = FlaxRegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : str ) -> Union[str, Any]:
return
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self : Tuple ) -> Tuple:
pass
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : str ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='np' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Dict = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """vivit"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Dict=[2, 1_6, 1_6] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Union[str, Any]=7_6_8 , lowerCAmelCase_ : str=1_2 , lowerCAmelCase_ : Dict=1_2 , lowerCAmelCase_ : int=3_0_7_2 , lowerCAmelCase_ : Dict="gelu_fast" , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : int=1e-06 , lowerCAmelCase_ : Optional[int]=True , **lowerCAmelCase_ : Any , ) -> str:
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = num_frames
__lowerCAmelCase = tubelet_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = qkv_bias
super().__init__(**lowerCAmelCase_ )
| 53 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowercase ( self : List[Any] ) -> List[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowercase ( self : int ) -> int:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(lowerCAmelCase_ )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", )
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(lowerCAmelCase_, data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : str ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'], padding=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'] if training_args.do_train else None, eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir, 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a_ ( lowerCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """git_vision_model"""
def __init__( self : Dict , lowerCAmelCase_ : int=7_6_8 , lowerCAmelCase_ : int=3_0_7_2 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : int=1_6 , lowerCAmelCase_ : Tuple="quick_gelu" , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Optional[int]=0.02 , **lowerCAmelCase_ : List[str] , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = hidden_act
@classmethod
def lowercase ( cls : Any , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
__lowerCAmelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """git"""
def __init__( self : Optional[int] , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]=3_0_5_2_2 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : str=6 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : str=3_0_7_2 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[Any]=1_0_2_4 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : List[str]=1e-12 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Any="absolute" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=1_0_1 , lowerCAmelCase_ : Union[str, Any]=1_0_2 , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Dict , ) -> List[Any]:
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
if vision_config is None:
__lowerCAmelCase = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
__lowerCAmelCase = GitVisionConfig(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = tie_word_embeddings
__lowerCAmelCase = num_image_with_embedding
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
def lowercase ( self : List[str] ) -> List[Any]:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.vision_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 53 |
def a_ ( lowerCAmelCase_ : int = 200_0000 ):
__lowerCAmelCase = [0 for i in range(n + 1 )]
__lowerCAmelCase = 1
__lowerCAmelCase = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, lowerCAmelCase_ ):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_snake_case : Tuple = logging.getLogger()
_snake_case : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any , lowerCAmelCase_ : Dict ) -> Optional[int]:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowerCAmelCase = {'source': 'What is love ?', 'target': 'life'}
__lowerCAmelCase = {'train': 1_2, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__lowerCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCAmelCase_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str = "pytorch" ) -> List[str]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'output' )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'data' )
self._create_dummy_data(data_dir=lowerCAmelCase_ )
__lowerCAmelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'metrics.json' )
with open(lowerCAmelCase_ ) as f:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
return result
@require_torch_gpu
def lowercase ( self : str ) -> int:
__lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 53 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : List[str] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]="resnet50" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : List[Any] ) -> Union[str, Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> int:
__lowerCAmelCase = TimmBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase ( self : List[str] ) -> Tuple:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase ( self : str ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Any ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase ( self : Dict ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Any ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Tuple ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase ( self : int ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase ( self : Dict ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[str] ) -> Optional[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
| 53 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
__lowerCAmelCase = ksize + 1
__lowerCAmelCase = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(lowerCAmelCase_ ):
for x in range(lowerCAmelCase_ ):
# distance from center
__lowerCAmelCase = x - ksize // 2
__lowerCAmelCase = y - ksize // 2
# degree to radiant
__lowerCAmelCase = theta / 180 * np.pi
__lowerCAmelCase = np.cos(_theta )
__lowerCAmelCase = np.sin(_theta )
# get kernel x
__lowerCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
__lowerCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
__lowerCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Any = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : Dict = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case : Any = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : int = out / out.max() * 255
_snake_case : int = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 53 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a_ ( lowerCAmelCase_ : str=None ):
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser('env' )
else:
__lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file', default=lowerCAmelCase_, help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = is_xpu_available()
__lowerCAmelCase = is_npu_available()
__lowerCAmelCase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
__lowerCAmelCase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'PyTorch XPU available': str(lowerCAmelCase_ ),
'PyTorch NPU available': str(lowerCAmelCase_ ),
'System RAM': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__lowerCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCAmelCase = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_, lowerCAmelCase_ )
else F"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__lowerCAmelCase = accelerate_config
return info
def a_ ( ):
__lowerCAmelCase = env_command_parser()
__lowerCAmelCase = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 53 | 1 |
from sklearn.metrics import recall_score
import datasets
_snake_case : Dict = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
_snake_case : Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
_snake_case : str = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Optional[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]="binary" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str="warn" , ) -> int:
__lowerCAmelCase = recall_score(
lowerCAmelCase_ , lowerCAmelCase_ , labels=lowerCAmelCase_ , pos_label=lowerCAmelCase_ , average=lowerCAmelCase_ , sample_weight=lowerCAmelCase_ , zero_division=lowerCAmelCase_ , )
return {"recall": float(lowerCAmelCase_ ) if score.size == 1 else score}
| 53 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
__lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=lowerCAmelCase_, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=lowerCAmelCase_, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=lowerCAmelCase_ )
return parser.parse_args()
def a_ ( ):
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 53 | 1 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[float], lowerCAmelCase_ : int ):
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCAmelCase_ ):
print(F"""{i}\t\t{d}""" )
def a_ ( lowerCAmelCase_ : list[dict[str, int]], lowerCAmelCase_ : list[float], lowerCAmelCase_ : int ):
for j in range(lowerCAmelCase_ ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def a_ ( lowerCAmelCase_ : list[dict[str, int]], lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
__lowerCAmelCase = [float('inf' )] * vertex_count
__lowerCAmelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCAmelCase_ ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
__lowerCAmelCase = distance[u] + w
__lowerCAmelCase = check_negative_cycle(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : str = int(input('Enter number of vertices: ').strip())
_snake_case : Union[str, Any] = int(input('Enter number of edges: ').strip())
_snake_case : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_snake_case , _snake_case , _snake_case : List[str] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_snake_case : List[Any] = {'src': src, 'dst': dest, 'weight': weight}
_snake_case : Tuple = int(input('\nEnter shortest path source:').strip())
_snake_case : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 53 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Tuple=[2, 2, 3, 2] , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = num_stages
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase ( self : Dict ) -> List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase_ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = UperNetForSemanticSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = UperNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Tuple ) -> Union[str, Any]:
return
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : str ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> List[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
__lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowercase ( self : Any ) -> int:
pass
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' )
__lowerCAmelCase = Image.open(lowerCAmelCase_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : list[tuple[float, float]] ) -> Any:
__lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowerCAmelCase = len(lowerCAmelCase_ ) - 1
def lowercase ( self : int , lowerCAmelCase_ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase_ ) , 5 ) == 1
return output_values
def lowercase ( self : List[Any] , lowerCAmelCase_ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCAmelCase = self.basis_function(lowerCAmelCase_ )
__lowerCAmelCase = 0.0
__lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : float = 0.01 ) -> Optional[int]:
from matplotlib import pyplot as plt # type: ignore
__lowerCAmelCase = [] # x coordinates of points to plot
__lowerCAmelCase = [] # y coordinates of points to plot
__lowerCAmelCase = 0.0
while t <= 1:
__lowerCAmelCase = self.bezier_curve_function(lowerCAmelCase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowerCAmelCase = [i[0] for i in self.list_of_points]
__lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase_ , lowerCAmelCase_ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(lowerCAmelCase_ , lowerCAmelCase_ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
if issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = text_path
elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = 'train'
__lowerCAmelCase = {'train': text_path, 'test': text_path}
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 53 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_snake_case : int = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = set()
__lowerCAmelCase = []
def parse_line(lowerCAmelCase_ : str ):
for line in fp:
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase_ ) > 0:
__lowerCAmelCase = '\n'.join(lowerCAmelCase_ )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(lowerCAmelCase_ )
buffer.clear()
continue
else:
__lowerCAmelCase = line.strip()
buffer.append(lowerCAmelCase_ )
if from_gh:
for filename in os.listdir(lowerCAmelCase_ ):
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase_ ) as fp:
parse_line(lowerCAmelCase_ )
else:
try:
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase_ ) as fp:
parse_line(lowerCAmelCase_ )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : int ):
__lowerCAmelCase = set()
__lowerCAmelCase = [os.path.join(lowerCAmelCase_, lowerCAmelCase_ ) for p in os.listdir(lowerCAmelCase_ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase_, lowerCAmelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def a_ ( lowerCAmelCase_ : str ):
return values.split(',' )
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
_snake_case : List[Any] = parser.parse_args()
_snake_case : List[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_snake_case : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_snake_case : List[str] = extract_warnings(args.output_dir, args.targets)
_snake_case : int = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = XLNetTokenizer
a_ = XLNetTokenizerFast
a_ = True
a_ = True
def lowercase ( self : Tuple ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = XLNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : str ) -> Dict:
__lowerCAmelCase = '<s>'
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_0_6 )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = XLNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
__lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowercase ( self : int ) -> Optional[Any]:
__lowerCAmelCase = XLNetTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = XLNetTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
__lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__lowerCAmelCase = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 53 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
_snake_case : Dict = '1'
_snake_case : str = '0'
_snake_case : Optional[Any] = '1'
_snake_case : int = ort.SessionOptions()
_snake_case : Optional[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
_snake_case : List[Any] = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
_snake_case : Any = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
_snake_case : Tuple = ort.RunOptions()
_snake_case : Any = 128
_snake_case : str = 1
_snake_case : Tuple = np.ones((batch, sequence), dtype=np.intaa)
_snake_case : int = np.ones((batch, sequence), dtype=np.intaa)
_snake_case : List[str] = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
_snake_case : str = time.time()
_snake_case : Tuple = 2000
_snake_case : Any = {}
for iter in range(max_iters):
_snake_case : Tuple = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
| 53 |
import math
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int ):
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
__lowerCAmelCase = 0
while arr[min(lowerCAmelCase_, lowerCAmelCase_ ) - 1] < x:
__lowerCAmelCase = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCAmelCase = prev + 1
if prev == min(lowerCAmelCase_, lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case : List[str] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Optional[Any] = [int(item) for item in user_input.split(',')]
_snake_case : List[str] = int(input('Enter the number to be searched:\n'))
_snake_case : Optional[int] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""")
| 53 | 1 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : int ) -> None:
__lowerCAmelCase = size
__lowerCAmelCase = [0] * size
__lowerCAmelCase = [0] * size
@staticmethod
def lowercase ( lowerCAmelCase_ : int ) -> int:
return index | (index + 1)
@staticmethod
def lowercase ( lowerCAmelCase_ : int ) -> int:
return (index & (index + 1)) - 1
def lowercase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
__lowerCAmelCase = value
while index < self.size:
__lowerCAmelCase = self.get_prev(lowerCAmelCase_ ) + 1
if current_left_border == index:
__lowerCAmelCase = value
else:
__lowerCAmelCase = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = self.get_next(lowerCAmelCase_ )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int:
right -= 1 # Because of right is exclusive
__lowerCAmelCase = 0
while left <= right:
__lowerCAmelCase = self.get_prev(lowerCAmelCase_ )
if left <= current_left:
__lowerCAmelCase = max(lowerCAmelCase_ , self.tree[right] )
__lowerCAmelCase = current_left
else:
__lowerCAmelCase = max(lowerCAmelCase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_snake_case : Dict = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
_snake_case : Any = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
_snake_case : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
_snake_case : List[Any] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
_snake_case : Dict = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
_snake_case : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
_snake_case : Optional[int] = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def a_ ( ):
__lowerCAmelCase , __lowerCAmelCase = randrange(len(lowerCAmelCase_ ) ), randrange(len(lowerCAmelCase_ ) )
__lowerCAmelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__lowerCAmelCase , __lowerCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a_ ( lowerCAmelCase_ : int = 100 ):
return (generate_random_hand() for _ in range(lowerCAmelCase_ ))
@pytest.mark.parametrize('hand, expected', lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
assert PokerHand(lowerCAmelCase_ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected', lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Dict ):
assert PokerHand(lowerCAmelCase_ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values', lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Dict ):
__lowerCAmelCase = PokerHand(lowerCAmelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected', lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Optional[int] ):
assert PokerHand(lowerCAmelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected', lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str] ):
assert PokerHand(lowerCAmelCase_ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected', lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Optional[Any] ):
assert PokerHand(lowerCAmelCase_ ).compare_with(PokerHand(lowerCAmelCase_ ) ) == expected
@pytest.mark.parametrize('hand, other, expected', generate_random_hands() )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int ):
assert PokerHand(lowerCAmelCase_ ).compare_with(PokerHand(lowerCAmelCase_ ) ) == expected
def a_ ( ):
__lowerCAmelCase = [PokerHand(lowerCAmelCase_ ) for hand in SORTED_HANDS]
__lowerCAmelCase = poker_hands.copy()
shuffle(lowerCAmelCase_ )
__lowerCAmelCase = chain(sorted(lowerCAmelCase_ ) )
for index, hand in enumerate(lowerCAmelCase_ ):
assert hand == poker_hands[index]
def a_ ( ):
# Test that five high straights are compared correctly.
__lowerCAmelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a_ ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__lowerCAmelCase = PokerHand('2C 4S AS 3D 5C' )
__lowerCAmelCase = True
__lowerCAmelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a_ ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__lowerCAmelCase = 0
__lowerCAmelCase = os.path.abspath(os.path.dirname(lowerCAmelCase_ ) )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'poker_hands.txt' )
with open(lowerCAmelCase_ ) as file_hand:
for line in file_hand:
__lowerCAmelCase = line[:14].strip()
__lowerCAmelCase = line[15:].strip()
__lowerCAmelCase , __lowerCAmelCase = PokerHand(lowerCAmelCase_ ), PokerHand(lowerCAmelCase_ )
__lowerCAmelCase = player.compare_with(lowerCAmelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 53 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53 | 1 |
import math
from datetime import datetime, timedelta
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = year % 19
__lowerCAmelCase = year % 4
__lowerCAmelCase = year % 7
__lowerCAmelCase = math.floor(year / 100 )
__lowerCAmelCase = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCAmelCase = leap_day_inhibits / 4
__lowerCAmelCase = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCAmelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCAmelCase = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCAmelCase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase_, 4, 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase_, 4, 18 )
else:
return datetime(lowerCAmelCase_, 3, 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
_snake_case : Union[str, Any] = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 53 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case : List[Any] = True
from torch.cuda.amp import autocast
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : str=None ):
return field(default_factory=lambda: default, metadata=lowerCAmelCase_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a_ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
a_ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
a_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
a_ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
def __call__( self : int , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCAmelCase = [{'input_values': feature['input_values']} for feature in features]
__lowerCAmelCase = [{'input_ids': feature['labels']} for feature in features]
__lowerCAmelCase = self.processor.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCAmelCase = self.processor.pad(
labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCAmelCase = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__lowerCAmelCase = labels
return batch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__lowerCAmelCase = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCAmelCase = datasets.load_dataset(
'common_voice', data_args.dataset_config_name, split=data_args.train_split_name )
__lowerCAmelCase = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test' )
# Create and save tokenizer
__lowerCAmelCase = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCAmelCase_ : Any ):
__lowerCAmelCase = re.sub(lowerCAmelCase_, '', batch['sentence'] ).lower() + ' '
return batch
__lowerCAmelCase = train_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
__lowerCAmelCase = eval_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
def extract_all_chars(lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = ' '.join(batch['text'] )
__lowerCAmelCase = list(set(lowerCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=train_dataset.column_names, )
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=eval_dataset.column_names, )
__lowerCAmelCase = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCAmelCase = {v: k for k, v in enumerate(lowerCAmelCase_ )}
__lowerCAmelCase = vocab_dict[' ']
del vocab_dict[" "]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
with open('vocab.json', 'w' ) as vocab_file:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = WavaVecaCTCTokenizer(
'vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|', )
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCAmelCase_ ), data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCAmelCase_ ) )
if data_args.max_val_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCAmelCase = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase_ : int ):
__lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch['path'] )
__lowerCAmelCase = resampler(lowerCAmelCase_ ).squeeze().numpy()
__lowerCAmelCase = 1_6000
__lowerCAmelCase = batch['text']
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCAmelCase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__lowerCAmelCase = processor(
audio=batch['speech'], text=batch['target_text'], sampling_rate=batch['sampling_rate'][0] )
batch.update(lowerCAmelCase_ )
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
# Metric
__lowerCAmelCase = datasets.load_metric('wer' )
def compute_metrics(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = pred.predictions
__lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=-1 )
__lowerCAmelCase = processor.tokenizer.pad_token_id
__lowerCAmelCase = processor.batch_decode(lowerCAmelCase_ )
# we do not want to group tokens when computing the metrics
__lowerCAmelCase = processor.batch_decode(pred.label_ids, group_tokens=lowerCAmelCase_ )
__lowerCAmelCase = wer_metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCAmelCase = DataCollatorCTCWithPadding(processor=lowerCAmelCase_, padding=lowerCAmelCase_ )
# Initialize our Trainer
__lowerCAmelCase = CTCTrainer(
model=lowerCAmelCase_, data_collator=lowerCAmelCase_, args=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('train', lowerCAmelCase_ )
trainer.save_metrics('train', lowerCAmelCase_ )
trainer.save_state()
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ )
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('eval', lowerCAmelCase_ )
trainer.save_metrics('eval', lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 53 | 1 |
import math
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int ):
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
__lowerCAmelCase = 0
while arr[min(lowerCAmelCase_, lowerCAmelCase_ ) - 1] < x:
__lowerCAmelCase = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCAmelCase = prev + 1
if prev == min(lowerCAmelCase_, lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case : List[str] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Optional[Any] = [int(item) for item in user_input.split(',')]
_snake_case : List[str] = int(input('Enter the number to be searched:\n'))
_snake_case : Optional[int] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""")
| 53 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_snake_case : str = {
'yjernite/retribert-base-uncased': 512,
}
_snake_case : Optional[int] = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = RetriBertTokenizer
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : List[str]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case : str = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str=1_4_0_8 , lowerCAmelCase_ : List[str]=6_1_4_4 , lowerCAmelCase_ : Any=3_9 , lowerCAmelCase_ : int=1_6 , lowerCAmelCase_ : Optional[int]=2_2_4 , lowerCAmelCase_ : Union[str, Any]=1_4 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Tuple=1e-6 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Tuple=1e-10 , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Dict , ) -> str:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = patch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = hidden_act
__lowerCAmelCase = qkv_bias
@classmethod
def lowercase ( cls : List[str] , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__lowerCAmelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """instructblip_qformer"""
def __init__( self : Tuple , lowerCAmelCase_ : Optional[Any]=3_0_5_2_2 , lowerCAmelCase_ : Any=7_6_8 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Tuple=1_2 , lowerCAmelCase_ : Tuple=3_0_7_2 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=5_1_2 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Optional[Any]=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Any="absolute" , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : List[Any]=1_4_0_8 , **lowerCAmelCase_ : Optional[Any] , ) -> int:
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = cross_attention_frequency
__lowerCAmelCase = encoder_hidden_size
@classmethod
def lowercase ( cls : int , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Tuple ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__lowerCAmelCase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """instructblip"""
a_ = True
def __init__( self : Dict , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[Any]=3_2 , **lowerCAmelCase_ : Tuple ) -> int:
super().__init__(**lowerCAmelCase_ )
if vision_config is None:
__lowerCAmelCase = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__lowerCAmelCase = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__lowerCAmelCase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__lowerCAmelCase = InstructBlipVisionConfig(**lowerCAmelCase_ )
__lowerCAmelCase = InstructBlipQFormerConfig(**lowerCAmelCase_ )
__lowerCAmelCase = text_config['model_type'] if 'model_type' in text_config else 'opt'
__lowerCAmelCase = CONFIG_MAPPING[text_model_type](**lowerCAmelCase_ )
__lowerCAmelCase = self.text_config.tie_word_embeddings
__lowerCAmelCase = self.text_config.is_encoder_decoder
__lowerCAmelCase = num_query_tokens
__lowerCAmelCase = self.vision_config.hidden_size
__lowerCAmelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowerCAmelCase = 1.0
__lowerCAmelCase = 0.02
@classmethod
def lowercase ( cls : Dict , lowerCAmelCase_ : InstructBlipVisionConfig , lowerCAmelCase_ : InstructBlipQFormerConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase_ , )
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.vision_config.to_dict()
__lowerCAmelCase = self.qformer_config.to_dict()
__lowerCAmelCase = self.text_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 53 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_snake_case : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
_snake_case : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a_ ( ):
__lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def a_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase_, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def a_ ( ):
__lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a_ ( ):
__lowerCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(lowerCAmelCase_ )
# assert canny array for at least one True
assert canny_array.any()
def a_ ( ):
assert gg.gaussian_filter(lowerCAmelCase_, 5, sigma=0.9 ).all()
def a_ ( ):
# laplace diagonals
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(lowerCAmelCase_, lowerCAmelCase_ ).astype(lowerCAmelCase_ )
assert res.any()
def a_ ( ):
assert med.median_filter(lowerCAmelCase_, 3 ).any()
def a_ ( ):
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(lowerCAmelCase_ )
assert grad.any() and theta.any()
def a_ ( ):
__lowerCAmelCase = sp.make_sepia(lowerCAmelCase_, 20 )
assert sepia.all()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
__lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase_, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
__lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase_, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def a_ ( ):
__lowerCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(lowerCAmelCase_, 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert lbp_image.any()
| 53 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'num_attention_heads' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any]=1_3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Optional[int]=6_4_0 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Optional[int]="silu" , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Any=3_2 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[int]=1_0 , lowerCAmelCase_ : int=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = last_hidden_size
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = conv_kernel_size
__lowerCAmelCase = output_stride
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = use_labels
__lowerCAmelCase = is_training
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase ( self : Dict ) -> Optional[Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> Any:
__lowerCAmelCase = MobileViTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> int:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MobileViTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> Dict:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MobileViTForSemanticSegmentation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = MobileViTModelTester(self )
__lowerCAmelCase = MobileViTConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : int ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase ( self : List[str] ) -> Dict:
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase ( self : List[str] ) -> Tuple:
pass
def lowercase ( self : str ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Optional[Any] ) -> int:
pass
def lowercase ( self : Optional[Any] ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : str ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = 5
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowerCAmelCase = 2
for i in range(len(lowerCAmelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@slow
def lowercase ( self : int ) -> Optional[Any]:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MobileViTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Optional[int] ) -> List[Any]:
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__lowerCAmelCase = model.to(lowerCAmelCase_ )
__lowerCAmelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.logits
# verify the logits
__lowerCAmelCase = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=lowerCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__lowerCAmelCase = model.to(lowerCAmelCase_ )
__lowerCAmelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.logits.detach().cpu()
__lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ , target_sizes=[(5_0, 6_0)] )
__lowerCAmelCase = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase_ )
__lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase_ )
| 53 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = XLMProphetNetTokenizer
a_ = False
a_ = True
def lowercase ( self : List[str] ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = '[PAD]'
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_1_2 )
def lowercase ( self : Any ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = 'Hello World!'
__lowerCAmelCase = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
# fmt: off
__lowerCAmelCase = {'input_ids': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 53 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[str]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_0_0
return config
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase ( self : Optional[int] ) -> Tuple:
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = StableUnCLIPImgaImgPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([] )
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = 3_2
__lowerCAmelCase = embedder_hidden_size
# image encoding components
__lowerCAmelCase = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_ )
__lowerCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : List[str]=True ) -> str:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if pil_image:
__lowerCAmelCase = input_image * 0.5 + 0.5
__lowerCAmelCase = input_image.clamp(0 , 1 )
__lowerCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCAmelCase = DiffusionPipeline.numpy_to_pil(lowerCAmelCase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableUnCLIPImgaImgPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
inputs.update({'image_embeds': None} )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_ )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase ( self : Any ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase_ )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Any ) -> List[Any]:
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
__lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase = pipe(lowerCAmelCase_ , 'anime turle' , generator=lowerCAmelCase_ , output_type='np' )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> int:
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
__lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase = pipe(lowerCAmelCase_ , 'anime turle' , generator=lowerCAmelCase_ , output_type='np' )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
lowerCAmelCase_ , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 53 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Union[str, Any] = 2
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , *, # begin keyword-only arguments
lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : Dict="<pad>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = bos, unk, pad, eos
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = len(self.symbols )
def __eq__( self : Dict , lowerCAmelCase_ : Dict ) -> str:
return self.indices == other.indices
def __getitem__( self : List[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple ) -> List[Any]:
return len(self.symbols )
def __contains__( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]:
return sym in self.indices
@classmethod
def lowercase ( cls : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = cls()
d.add_from_file(lowerCAmelCase_ )
return d
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Any=False ) -> Optional[Any]:
if word in self.indices and not overwrite:
__lowerCAmelCase = self.indices[word]
__lowerCAmelCase = self.count[idx] + n
return idx
else:
__lowerCAmelCase = len(self.symbols )
__lowerCAmelCase = idx
self.symbols.append(lowerCAmelCase_ )
self.count.append(lowerCAmelCase_ )
return idx
def lowercase ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return 0
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> int:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowerCAmelCase_ ) )
return
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = self._load_meta(lowerCAmelCase_ )
for line in lines[indices_start_line:]:
try:
__lowerCAmelCase , __lowerCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = line.rsplit(' ' , 1 )
else:
__lowerCAmelCase = False
__lowerCAmelCase = int(lowerCAmelCase_ )
__lowerCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowerCAmelCase_ ) )
self.add_symbol(lowerCAmelCase_ , n=lowerCAmelCase_ , overwrite=lowerCAmelCase_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def a_ ( lowerCAmelCase_ : List[str] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase = dict((re.sub(R'@@$', '', lowerCAmelCase_ ), v) if k.endswith('@@' ) else (re.sub(R'$', '</w>', lowerCAmelCase_ ), v) for k, v in d.items() )
__lowerCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCAmelCase = d[k] # restore
return da
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
# prep
if not os.path.exists(lowerCAmelCase_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'checkpoint.pt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = chkpt['cfg']['model']
# dicts
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'dict.txt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__lowerCAmelCase = Dictionary.load(lowerCAmelCase_ )
__lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# merges_file (bpecodes)
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'bpecodes' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCAmelCase_, lowerCAmelCase_ )
# model config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'config.json' )
__lowerCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# tokenizer config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# model
__lowerCAmelCase = chkpt['model']
# remove unneeded keys
__lowerCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
else:
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
__lowerCAmelCase = BioGptConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = BioGptForCausalLM(lowerCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase_ )
# save
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
print('Conversion is done!' )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """pixel_values"""
a_ = False
a_ = TimmBackboneConfig
def __init__( self : Tuple , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
requires_backends(self , 'timm' )
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCAmelCase_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCAmelCase = getattr(lowerCAmelCase_ , 'use_pretrained_backbone' , lowerCAmelCase_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(lowerCAmelCase_ , 'out_indices' , lowerCAmelCase_ ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=lowerCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase_ , **lowerCAmelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer['module']: str(lowerCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : int , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop('use_timm_backbone' , lowerCAmelCase_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
__lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , features_only=lowerCAmelCase_ , use_pretrained_backbone=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , )
return super()._from_config(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
pass
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = tuple(lowerCAmelCase_ ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , attentions=lowerCAmelCase_ )
| 53 | 1 |
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : int ):
if digit_amount > 0:
return round(number - int(lowerCAmelCase_ ), lowerCAmelCase_ )
return number - int(lowerCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 53 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[float] ):
if len(lowerCAmelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : Optional[int] = 16
_snake_case : int = 32
def a_ ( lowerCAmelCase_ : Accelerator, lowerCAmelCase_ : DatasetDict, lowerCAmelCase_ : List[int], lowerCAmelCase_ : List[int], lowerCAmelCase_ : int = 16 ):
__lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCAmelCase = DatasetDict(
{
'train': dataset['train'].select(lowerCAmelCase_ ),
'validation': dataset['train'].select(lowerCAmelCase_ ),
'test': dataset['validation'],
} )
def tokenize_function(lowerCAmelCase_ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets['test'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Any ):
# New Code #
__lowerCAmelCase = []
# Download the dataset
__lowerCAmelCase = load_dataset('glue', 'mrpc' )
# Create our splits
__lowerCAmelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config['lr']
__lowerCAmelCase = int(config['num_epochs'] )
__lowerCAmelCase = int(config['seed'] )
__lowerCAmelCase = int(config['batch_size'] )
__lowerCAmelCase = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
__lowerCAmelCase = kfold.split(np.zeros(datasets['train'].num_rows ), datasets['train']['label'] )
__lowerCAmelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_fold_dataloaders(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
__lowerCAmelCase = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 )
__lowerCAmelCase = torch.stack(lowerCAmelCase_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__lowerCAmelCase = metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
accelerator.print('Average test metrics from all folds:', lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds', type=lowerCAmelCase_, default=3, help='The number of splits to perform across the dataset' )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 53 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="relu" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : Tuple ) -> List[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_ = False
a_ = False
a_ = False
def lowercase ( self : Dict ) -> None:
__lowerCAmelCase = FlaxRegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : str ) -> Union[str, Any]:
return
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self : Tuple ) -> Tuple:
pass
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : str ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='np' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(lowerCAmelCase_ )
__lowerCAmelCase = cos(lowerCAmelCase_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = (1 - _cos) / 2
__lowerCAmelCase = 1 - _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(lowerCAmelCase_ )
__lowerCAmelCase = cos(lowerCAmelCase_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = (1 + _cos) / 2
__lowerCAmelCase = -1 - _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(lowerCAmelCase_ )
__lowerCAmelCase = cos(lowerCAmelCase_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = _sin / 2
__lowerCAmelCase = 0
__lowerCAmelCase = -ba
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(lowerCAmelCase_ )
__lowerCAmelCase = cos(lowerCAmelCase_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float, lowerCAmelCase_ : float = 1 / sqrt(2 ), ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(lowerCAmelCase_ )
__lowerCAmelCase = cos(lowerCAmelCase_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 10 ** (gain_db / 40)
__lowerCAmelCase = 1 + alpha * big_a
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha * big_a
__lowerCAmelCase = 1 + alpha / big_a
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha / big_a
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float, lowerCAmelCase_ : float = 1 / sqrt(2 ), ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(lowerCAmelCase_ )
__lowerCAmelCase = cos(lowerCAmelCase_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 10 ** (gain_db / 40)
__lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
__lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
__lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
__lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
__lowerCAmelCase = 2 * sqrt(lowerCAmelCase_ ) * alpha
__lowerCAmelCase = big_a * (pmc + aaa)
__lowerCAmelCase = 2 * big_a * mpc
__lowerCAmelCase = big_a * (pmc - aaa)
__lowerCAmelCase = ppmc + aaa
__lowerCAmelCase = -2 * pmpc
__lowerCAmelCase = ppmc - aaa
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float, lowerCAmelCase_ : float = 1 / sqrt(2 ), ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(lowerCAmelCase_ )
__lowerCAmelCase = cos(lowerCAmelCase_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 10 ** (gain_db / 40)
__lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
__lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
__lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
__lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
__lowerCAmelCase = 2 * sqrt(lowerCAmelCase_ ) * alpha
__lowerCAmelCase = big_a * (ppmc + aaa)
__lowerCAmelCase = -2 * big_a * pmpc
__lowerCAmelCase = big_a * (ppmc - aaa)
__lowerCAmelCase = pmc + aaa
__lowerCAmelCase = 2 * mpc
__lowerCAmelCase = pmc - aaa
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 53 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowercase ( self : List[Any] ) -> List[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowercase ( self : int ) -> int:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(lowerCAmelCase_ )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", )
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(lowerCAmelCase_, data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : str ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'], padding=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'] if training_args.do_train else None, eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir, 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a_ ( lowerCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files', [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
], )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md', 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md', 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json', 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
__lowerCAmelCase = DatasetInfosDict.from_directory(lowerCAmelCase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info', [
DatasetInfo(),
DatasetInfo(
description='foo', features=Features({'a': Value('int32' )} ), builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train'}], download_size=42, ),
], )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : DatasetInfo ):
__lowerCAmelCase = str(lowerCAmelCase_ )
dataset_info.write_to_directory(lowerCAmelCase_ )
__lowerCAmelCase = DatasetInfo.from_directory(lowerCAmelCase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCAmelCase_, 'dataset_info.json' ) )
def a_ ( ):
__lowerCAmelCase = DatasetInfo(
description='foo', citation='bar', homepage='https://foo.bar', license='CC0', features=Features({'a': Value('int32' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train', 'num_examples': 42}], download_checksums={}, download_size=1337, post_processing_size=442, dataset_size=1234, size_in_bytes=1337 + 442 + 1234, )
__lowerCAmelCase = dataset_info._to_yaml_dict()
assert sorted(lowerCAmelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
__lowerCAmelCase = yaml.safe_dump(lowerCAmelCase_ )
__lowerCAmelCase = yaml.safe_load(lowerCAmelCase_ )
assert dataset_info_yaml_dict == reloaded
def a_ ( ):
__lowerCAmelCase = DatasetInfo()
__lowerCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict', [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo', features=Features({'a': Value('int32' )} ), builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train'}], download_size=42, )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : DatasetInfosDict ):
__lowerCAmelCase = str(lowerCAmelCase_ )
dataset_infos_dict.write_to_directory(lowerCAmelCase_ )
__lowerCAmelCase = DatasetInfosDict.from_directory(lowerCAmelCase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowerCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowerCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCAmelCase_, 'README.md' ) )
| 53 |
def a_ ( lowerCAmelCase_ : int = 200_0000 ):
__lowerCAmelCase = [0 for i in range(n + 1 )]
__lowerCAmelCase = 1
__lowerCAmelCase = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, lowerCAmelCase_ ):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Optional[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_choices
def lowercase ( self : List[Any] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCAmelCase_ , )
return config, input_ids, attention_mask
def lowercase ( self : Any ) -> int:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase ( self : int ) -> int:
__lowerCAmelCase = FlaxDistilBertModelTester(self )
@slow
def lowercase ( self : str ) -> str:
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained('distilbert-base-uncased' )
__lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
__lowerCAmelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_snake_case : Tuple = logging.getLogger()
_snake_case : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any , lowerCAmelCase_ : Dict ) -> Optional[int]:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowerCAmelCase = {'source': 'What is love ?', 'target': 'life'}
__lowerCAmelCase = {'train': 1_2, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__lowerCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCAmelCase_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str = "pytorch" ) -> List[str]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'output' )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'data' )
self._create_dummy_data(data_dir=lowerCAmelCase_ )
__lowerCAmelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'metrics.json' )
with open(lowerCAmelCase_ ) as f:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
return result
@require_torch_gpu
def lowercase ( self : str ) -> int:
__lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 53 | 1 |
import math
class _UpperCAmelCase :
"""simple docstring"""
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : list[list[float]] , lowerCAmelCase_ : list[int] ) -> int:
__lowerCAmelCase = 0.0
__lowerCAmelCase = 0.0
for i in range(len(lowerCAmelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase ( self : str , lowerCAmelCase_ : list[list[int | float]] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> list[list[int | float]]:
for i in range(len(lowerCAmelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def a_ ( ):
# Training Examples ( m, n )
__lowerCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowerCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowerCAmelCase = SelfOrganizingMap()
__lowerCAmelCase = 3
__lowerCAmelCase = 0.5
for _ in range(lowerCAmelCase_ ):
for j in range(len(lowerCAmelCase_ ) ):
# training sample
__lowerCAmelCase = training_samples[j]
# Compute the winning vector
__lowerCAmelCase = self_organizing_map.get_winner(lowerCAmelCase_, lowerCAmelCase_ )
# Update the winning vector
__lowerCAmelCase = self_organizing_map.update(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# classify test sample
__lowerCAmelCase = [0, 0, 0, 1]
__lowerCAmelCase = self_organizing_map.get_winner(lowerCAmelCase_, lowerCAmelCase_ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 53 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]="resnet50" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : List[Any] ) -> Union[str, Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> int:
__lowerCAmelCase = TimmBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase ( self : List[str] ) -> Tuple:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase ( self : str ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Any ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase ( self : Dict ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Any ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Tuple ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase ( self : int ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase ( self : Dict ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[str] ) -> Optional[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
| 53 | 1 |
def a_ ( lowerCAmelCase_ : str ):
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = credit_card_number
__lowerCAmelCase = 0
__lowerCAmelCase = len(lowerCAmelCase_ ) - 2
for i in range(lowerCAmelCase_, -1, -2 ):
# double the value of every second digit
__lowerCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__lowerCAmelCase = cc_number[:i] + str(lowerCAmelCase_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase_ ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(lowerCAmelCase_ ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(lowerCAmelCase_ ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(lowerCAmelCase_ ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 53 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a_ ( lowerCAmelCase_ : str=None ):
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser('env' )
else:
__lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file', default=lowerCAmelCase_, help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = is_xpu_available()
__lowerCAmelCase = is_npu_available()
__lowerCAmelCase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
__lowerCAmelCase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'PyTorch XPU available': str(lowerCAmelCase_ ),
'PyTorch NPU available': str(lowerCAmelCase_ ),
'System RAM': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__lowerCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCAmelCase = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_, lowerCAmelCase_ )
else F"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__lowerCAmelCase = accelerate_config
return info
def a_ ( ):
__lowerCAmelCase = env_command_parser()
__lowerCAmelCase = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 53 | 1 |
from manim import *
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
__lowerCAmelCase = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
__lowerCAmelCase = VGroup(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
__lowerCAmelCase = Text('CPU' , font_size=2_4 )
__lowerCAmelCase = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase_ )
__lowerCAmelCase = [mem.copy() for i in range(1 )]
__lowerCAmelCase = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
__lowerCAmelCase = Text('GPU' , font_size=2_4 )
__lowerCAmelCase = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
gpu.align_to(lowerCAmelCase_ , lowerCAmelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCAmelCase_ )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
__lowerCAmelCase = Text('Model' , font_size=2_4 )
__lowerCAmelCase = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCAmelCase_ , run_time=1 ) , Create(lowerCAmelCase_ , run_time=1 ) , Create(lowerCAmelCase_ , run_time=1 ) , )
__lowerCAmelCase = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=2_4 , )
__lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase_ , run_time=2.5 ) , Write(lowerCAmelCase_ ) , Write(lowerCAmelCase_ ) )
self.add(lowerCAmelCase_ )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for i, rect in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase_ , opacity=0.7 )
cpu_target.move_to(lowerCAmelCase_ )
cpu_target.generate_target()
__lowerCAmelCase = 0.46 / 4
__lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCAmelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCAmelCase_ , buff=0.0 )
cpu_targs.append(lowerCAmelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCAmelCase_ ) )
second_animations.append(MoveToTarget(lowerCAmelCase_ , run_time=1.5 ) )
self.play(*lowerCAmelCase_ )
self.play(*lowerCAmelCase_ )
self.wait()
| 53 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
__lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=lowerCAmelCase_, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=lowerCAmelCase_, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=lowerCAmelCase_ )
return parser.parse_args()
def a_ ( ):
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 53 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case : List[Any] = True
from torch.cuda.amp import autocast
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : str=None ):
return field(default_factory=lambda: default, metadata=lowerCAmelCase_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a_ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
a_ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
a_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
a_ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
def __call__( self : int , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCAmelCase = [{'input_values': feature['input_values']} for feature in features]
__lowerCAmelCase = [{'input_ids': feature['labels']} for feature in features]
__lowerCAmelCase = self.processor.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCAmelCase = self.processor.pad(
labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCAmelCase = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__lowerCAmelCase = labels
return batch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__lowerCAmelCase = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCAmelCase = datasets.load_dataset(
'common_voice', data_args.dataset_config_name, split=data_args.train_split_name )
__lowerCAmelCase = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test' )
# Create and save tokenizer
__lowerCAmelCase = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCAmelCase_ : Any ):
__lowerCAmelCase = re.sub(lowerCAmelCase_, '', batch['sentence'] ).lower() + ' '
return batch
__lowerCAmelCase = train_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
__lowerCAmelCase = eval_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
def extract_all_chars(lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = ' '.join(batch['text'] )
__lowerCAmelCase = list(set(lowerCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=train_dataset.column_names, )
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=eval_dataset.column_names, )
__lowerCAmelCase = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCAmelCase = {v: k for k, v in enumerate(lowerCAmelCase_ )}
__lowerCAmelCase = vocab_dict[' ']
del vocab_dict[" "]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
with open('vocab.json', 'w' ) as vocab_file:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = WavaVecaCTCTokenizer(
'vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|', )
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCAmelCase_ ), data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCAmelCase_ ) )
if data_args.max_val_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCAmelCase = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase_ : int ):
__lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch['path'] )
__lowerCAmelCase = resampler(lowerCAmelCase_ ).squeeze().numpy()
__lowerCAmelCase = 1_6000
__lowerCAmelCase = batch['text']
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCAmelCase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__lowerCAmelCase = processor(
audio=batch['speech'], text=batch['target_text'], sampling_rate=batch['sampling_rate'][0] )
batch.update(lowerCAmelCase_ )
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
# Metric
__lowerCAmelCase = datasets.load_metric('wer' )
def compute_metrics(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = pred.predictions
__lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=-1 )
__lowerCAmelCase = processor.tokenizer.pad_token_id
__lowerCAmelCase = processor.batch_decode(lowerCAmelCase_ )
# we do not want to group tokens when computing the metrics
__lowerCAmelCase = processor.batch_decode(pred.label_ids, group_tokens=lowerCAmelCase_ )
__lowerCAmelCase = wer_metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCAmelCase = DataCollatorCTCWithPadding(processor=lowerCAmelCase_, padding=lowerCAmelCase_ )
# Initialize our Trainer
__lowerCAmelCase = CTCTrainer(
model=lowerCAmelCase_, data_collator=lowerCAmelCase_, args=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('train', lowerCAmelCase_ )
trainer.save_metrics('train', lowerCAmelCase_ )
trainer.save_state()
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ )
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('eval', lowerCAmelCase_ )
trainer.save_metrics('eval', lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 53 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Tuple=[2, 2, 3, 2] , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = num_stages
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase ( self : Dict ) -> List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase_ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = UperNetForSemanticSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = UperNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Tuple ) -> Union[str, Any]:
return
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : str ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> List[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
__lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowercase ( self : Any ) -> int:
pass
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' )
__lowerCAmelCase = Image.open(lowerCAmelCase_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Dict=1_0 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Union[str, Any]=3_2 * 4 , lowerCAmelCase_ : int=3_2 * 6 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : int=3_2 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = is_training
__lowerCAmelCase = use_auxiliary_loss
__lowerCAmelCase = num_queries
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_size
__lowerCAmelCase = max_size
__lowerCAmelCase = num_labels
__lowerCAmelCase = mask_feature_size
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase_ )
__lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase_ )
__lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase_ ) > 0.5
).float()
__lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase_ ) > 0.5).long()
__lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : str ) -> List[Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> Tuple:
__lowerCAmelCase = output.encoder_hidden_states
__lowerCAmelCase = output.pixel_decoder_hidden_states
__lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) , config.decoder_config.decoder_layers )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]=False ) -> Tuple:
with torch.no_grad():
__lowerCAmelCase = MaskFormerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = MaskFormerForInstanceSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
def comm_check_on_output(lowerCAmelCase_ : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCAmelCase = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
comm_check_on_output(lowerCAmelCase_ )
__lowerCAmelCase = model(
pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ )
comm_check_on_output(lowerCAmelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a_ = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = MaskFormerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> int:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def lowercase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def lowercase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : str ) -> Any:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[Any] ) -> Any:
pass
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
@slow
def lowercase ( self : Any ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowerCAmelCase = MaskFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = (self.model_tester.min_size,) * 2
__lowerCAmelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCAmelCase_ ),
'mask_labels': torch.randn((2, 1_0, *size) , device=lowerCAmelCase_ ),
'class_labels': torch.zeros(2 , 1_0 , device=lowerCAmelCase_ ).long(),
}
__lowerCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ ).to(lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : str ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowerCAmelCase = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : str ) -> Union[str, Any]:
# only MaskFormerForInstanceSegmentation has the loss
__lowerCAmelCase = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ )
__lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_snake_case : Any = 1e-4
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Dict ) -> Optional[Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
__lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
__lowerCAmelCase = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
__lowerCAmelCase = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowerCAmelCase_ )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
__lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# masks_queries_logits
__lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCAmelCase = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
__lowerCAmelCase = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
# class_queries_logits
__lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCAmelCase = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def lowercase ( self : Optional[int] ) -> str:
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowerCAmelCase_ )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
__lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# masks_queries_logits
__lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCAmelCase = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
__lowerCAmelCase = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
# class_queries_logits
__lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCAmelCase = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowerCAmelCase_ )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCAmelCase = inputs['pixel_values'].to(lowerCAmelCase_ )
__lowerCAmelCase = [el.to(lowerCAmelCase_ ) for el in inputs['mask_labels']]
__lowerCAmelCase = [el.to(lowerCAmelCase_ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertTrue(outputs.loss is not None )
| 53 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
if issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = text_path
elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = 'train'
__lowerCAmelCase = {'train': text_path, 'test': text_path}
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 53 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int = -1 ):
return LambdaLR(lowerCAmelCase_, lambda lowerCAmelCase_ : 1, last_epoch=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int, lowerCAmelCase_ : int = -1 ):
def lr_lambda(lowerCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0, lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : str, lowerCAmelCase_ : int = -1 ):
__lowerCAmelCase = {}
__lowerCAmelCase = step_rules.split(',' )
for rule_str in rule_list[:-1]:
__lowerCAmelCase , __lowerCAmelCase = rule_str.split(':' )
__lowerCAmelCase = int(lowerCAmelCase_ )
__lowerCAmelCase = float(lowerCAmelCase_ )
__lowerCAmelCase = value
__lowerCAmelCase = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
def rule_func(lowerCAmelCase_ : int ) -> float:
__lowerCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__lowerCAmelCase = create_rules_function(lowerCAmelCase_, lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : Any=-1 ):
def lr_lambda(lowerCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 0.5, lowerCAmelCase_ : int = -1 ):
def lr_lambda(lowerCAmelCase_ : Tuple ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
__lowerCAmelCase = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int = 1, lowerCAmelCase_ : int = -1 ):
def lr_lambda(lowerCAmelCase_ : str ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
__lowerCAmelCase = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[int]=1E-7, lowerCAmelCase_ : int=1.0, lowerCAmelCase_ : Optional[int]=-1 ):
__lowerCAmelCase = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__lowerCAmelCase = lr_init - lr_end
__lowerCAmelCase = num_training_steps - num_warmup_steps
__lowerCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__lowerCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
_snake_case : Optional[int] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a_ ( lowerCAmelCase_ : Union[str, SchedulerType], lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : Optional[str] = None, lowerCAmelCase_ : Optional[int] = None, lowerCAmelCase_ : Optional[int] = None, lowerCAmelCase_ : int = 1, lowerCAmelCase_ : float = 1.0, lowerCAmelCase_ : int = -1, ):
__lowerCAmelCase = SchedulerType(lowerCAmelCase_ )
__lowerCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_, step_rules=lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, num_training_steps=lowerCAmelCase_, num_cycles=lowerCAmelCase_, last_epoch=lowerCAmelCase_, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, num_training_steps=lowerCAmelCase_, power=lowerCAmelCase_, last_epoch=lowerCAmelCase_, )
return schedule_func(
lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, num_training_steps=lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
| 53 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_snake_case : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Optional[Any]=1_8 , lowerCAmelCase_ : Optional[Any]=3_0 , lowerCAmelCase_ : str=4_0_0 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=None , ) -> Tuple:
__lowerCAmelCase = size if size is not None else {'height': 2_0, 'width': 2_0}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_convert_rgb
__lowerCAmelCase = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
__lowerCAmelCase = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
def lowercase ( self : List[Any] ) -> Tuple:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = PixaStructImageProcessor if is_vision_available() else None
def lowercase ( self : List[Any] ) -> Dict:
__lowerCAmelCase = PixaStructImageProcessingTester(self )
@property
def lowercase ( self : List[str] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_convert_rgb' ) )
def lowercase ( self : str ) -> Dict:
__lowerCAmelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCAmelCase = 2_0_4_8
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' , max_patches=lowerCAmelCase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase ( self : Any ) -> Dict:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
__lowerCAmelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCAmelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCAmelCase = image_processor(
lowerCAmelCase_ , return_tensors='pt' , max_patches=lowerCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase ( self : List[str] ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
__lowerCAmelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
__lowerCAmelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCAmelCase_ ).flattened_patches
__lowerCAmelCase = 'Hello'
__lowerCAmelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCAmelCase_ , header_text=lowerCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCAmelCase = image_processor(
lowerCAmelCase_ , return_tensors='pt' , max_patches=lowerCAmelCase_ , header_text=lowerCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase ( self : Any ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
__lowerCAmelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCAmelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCAmelCase = image_processor(
lowerCAmelCase_ , return_tensors='pt' , max_patches=lowerCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase ( self : Optional[Any] ) -> List[Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCAmelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCAmelCase = image_processor(
lowerCAmelCase_ , return_tensors='pt' , max_patches=lowerCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = PixaStructImageProcessor if is_vision_available() else None
def lowercase ( self : Any ) -> int:
__lowerCAmelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCAmelCase = 3
@property
def lowercase ( self : Any ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_convert_rgb' ) )
def lowercase ( self : List[Any] ) -> Any:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
__lowerCAmelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCAmelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCAmelCase = image_processor(
lowerCAmelCase_ , return_tensors='pt' , max_patches=lowerCAmelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 53 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase_, architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__lowerCAmelCase = torch.load(hf_hub_download(repo_id=lowerCAmelCase_, filename='pytorch_model.bin' ) )
__lowerCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__lowerCAmelCase = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__lowerCAmelCase = tensor_value
__lowerCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase_, config=lowerCAmelCase_, state_dict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
# convert tokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53 |
import math
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int ):
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
__lowerCAmelCase = 0
while arr[min(lowerCAmelCase_, lowerCAmelCase_ ) - 1] < x:
__lowerCAmelCase = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCAmelCase = prev + 1
if prev == min(lowerCAmelCase_, lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case : List[str] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Optional[Any] = [int(item) for item in user_input.split(',')]
_snake_case : List[str] = int(input('Enter the number to be searched:\n'))
_snake_case : Optional[int] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""")
| 53 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_snake_case : str = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = R'\w+[.]\d+'
__lowerCAmelCase = re.findall(lowerCAmelCase_, lowerCAmelCase_ )
for pat in pats:
__lowerCAmelCase = key.replace(lowerCAmelCase_, '_'.join(pat.split('.' ) ) )
return key
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : str, lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__lowerCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__lowerCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__lowerCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowerCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__lowerCAmelCase = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowerCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
__lowerCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowerCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowerCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
__lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__lowerCAmelCase = flax_model.init_weights(PRNGKey(lowerCAmelCase_ ) )
__lowerCAmelCase = flatten_dict(lowerCAmelCase_ )
__lowerCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCAmelCase = rename_key(lowerCAmelCase_ )
__lowerCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
__lowerCAmelCase , __lowerCAmelCase = rename_key_and_reshape_tensor(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(lowerCAmelCase_ )
return unflatten_dict(lowerCAmelCase_ )
| 53 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Tuple = TypeVar('DatasetType', Dataset, IterableDataset)
def a_ ( lowerCAmelCase_ : List[DatasetType], lowerCAmelCase_ : Optional[List[float]] = None, lowerCAmelCase_ : Optional[int] = None, lowerCAmelCase_ : Optional[DatasetInfo] = None, lowerCAmelCase_ : Optional[NamedSplit] = None, lowerCAmelCase_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_, (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}.""" )
if i == 0:
__lowerCAmelCase , __lowerCAmelCase = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, info=lowerCAmelCase_, split=lowerCAmelCase_, stopping_strategy=lowerCAmelCase_ )
else:
return _interleave_iterable_datasets(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, info=lowerCAmelCase_, split=lowerCAmelCase_, stopping_strategy=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : List[DatasetType], lowerCAmelCase_ : Optional[DatasetInfo] = None, lowerCAmelCase_ : Optional[NamedSplit] = None, lowerCAmelCase_ : int = 0, ):
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_, (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}.""" )
if i == 0:
__lowerCAmelCase , __lowerCAmelCase = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCAmelCase_, info=lowerCAmelCase_, split=lowerCAmelCase_, axis=lowerCAmelCase_ )
else:
return _concatenate_iterable_datasets(lowerCAmelCase_, info=lowerCAmelCase_, split=lowerCAmelCase_, axis=lowerCAmelCase_ )
| 53 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : Union[str, Any] = 16
_snake_case : Optional[Any] = 32
def a_ ( lowerCAmelCase_ : Accelerator, lowerCAmelCase_ : int = 16 ):
__lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCAmelCase = load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case : Union[str, Any] = mocked_dataloaders # noqa: F811
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
__lowerCAmelCase = 2
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config['lr']
__lowerCAmelCase = int(config['num_epochs'] )
__lowerCAmelCase = int(config['seed'] )
__lowerCAmelCase = int(config['batch_size'] )
__lowerCAmelCase = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__lowerCAmelCase = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 53 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case : List[Any] = True
from torch.cuda.amp import autocast
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : str=None ):
return field(default_factory=lambda: default, metadata=lowerCAmelCase_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a_ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
a_ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
a_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
a_ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
def __call__( self : int , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCAmelCase = [{'input_values': feature['input_values']} for feature in features]
__lowerCAmelCase = [{'input_ids': feature['labels']} for feature in features]
__lowerCAmelCase = self.processor.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCAmelCase = self.processor.pad(
labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCAmelCase = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__lowerCAmelCase = labels
return batch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__lowerCAmelCase = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCAmelCase = datasets.load_dataset(
'common_voice', data_args.dataset_config_name, split=data_args.train_split_name )
__lowerCAmelCase = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test' )
# Create and save tokenizer
__lowerCAmelCase = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCAmelCase_ : Any ):
__lowerCAmelCase = re.sub(lowerCAmelCase_, '', batch['sentence'] ).lower() + ' '
return batch
__lowerCAmelCase = train_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
__lowerCAmelCase = eval_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
def extract_all_chars(lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = ' '.join(batch['text'] )
__lowerCAmelCase = list(set(lowerCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=train_dataset.column_names, )
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=eval_dataset.column_names, )
__lowerCAmelCase = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCAmelCase = {v: k for k, v in enumerate(lowerCAmelCase_ )}
__lowerCAmelCase = vocab_dict[' ']
del vocab_dict[" "]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
with open('vocab.json', 'w' ) as vocab_file:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = WavaVecaCTCTokenizer(
'vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|', )
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCAmelCase_ ), data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCAmelCase_ ) )
if data_args.max_val_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCAmelCase = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase_ : int ):
__lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch['path'] )
__lowerCAmelCase = resampler(lowerCAmelCase_ ).squeeze().numpy()
__lowerCAmelCase = 1_6000
__lowerCAmelCase = batch['text']
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCAmelCase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__lowerCAmelCase = processor(
audio=batch['speech'], text=batch['target_text'], sampling_rate=batch['sampling_rate'][0] )
batch.update(lowerCAmelCase_ )
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
# Metric
__lowerCAmelCase = datasets.load_metric('wer' )
def compute_metrics(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = pred.predictions
__lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=-1 )
__lowerCAmelCase = processor.tokenizer.pad_token_id
__lowerCAmelCase = processor.batch_decode(lowerCAmelCase_ )
# we do not want to group tokens when computing the metrics
__lowerCAmelCase = processor.batch_decode(pred.label_ids, group_tokens=lowerCAmelCase_ )
__lowerCAmelCase = wer_metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCAmelCase = DataCollatorCTCWithPadding(processor=lowerCAmelCase_, padding=lowerCAmelCase_ )
# Initialize our Trainer
__lowerCAmelCase = CTCTrainer(
model=lowerCAmelCase_, data_collator=lowerCAmelCase_, args=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('train', lowerCAmelCase_ )
trainer.save_metrics('train', lowerCAmelCase_ )
trainer.save_state()
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ )
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('eval', lowerCAmelCase_ )
trainer.save_metrics('eval', lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 53 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 53 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_snake_case : str = {
'yjernite/retribert-base-uncased': 512,
}
_snake_case : Optional[int] = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = RetriBertTokenizer
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : List[str]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53 | 1 |
import math
import qiskit
def a_ ( lowerCAmelCase_ : int = 1, lowerCAmelCase_ : int = 1, lowerCAmelCase_ : int = 1 ):
if (
isinstance(lowerCAmelCase_, lowerCAmelCase_ )
or isinstance(lowerCAmelCase_, lowerCAmelCase_ )
or isinstance(lowerCAmelCase_, lowerCAmelCase_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowerCAmelCase = qiskit.QuantumRegister(4, 'qr' )
__lowerCAmelCase = qiskit.ClassicalRegister(2, 'cr' )
# list the entries
__lowerCAmelCase = [input_a, input_a, carry_in]
__lowerCAmelCase = qiskit.QuantumCircuit(lowerCAmelCase_, lowerCAmelCase_ )
for i in range(0, 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0, 1, 3 ) # ccx = toffoli gate
quantum_circuit.cx(0, 1 )
quantum_circuit.ccx(1, 2, 3 )
quantum_circuit.cx(1, 2 )
quantum_circuit.cx(0, 1 )
quantum_circuit.measure([2, 3], lowerCAmelCase_ ) # measure the last two qbits
__lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
__lowerCAmelCase = qiskit.execute(lowerCAmelCase_, lowerCAmelCase_, shots=1000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 53 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_snake_case : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
_snake_case : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a_ ( ):
__lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def a_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase_, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def a_ ( ):
__lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a_ ( ):
__lowerCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(lowerCAmelCase_ )
# assert canny array for at least one True
assert canny_array.any()
def a_ ( ):
assert gg.gaussian_filter(lowerCAmelCase_, 5, sigma=0.9 ).all()
def a_ ( ):
# laplace diagonals
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(lowerCAmelCase_, lowerCAmelCase_ ).astype(lowerCAmelCase_ )
assert res.any()
def a_ ( ):
assert med.median_filter(lowerCAmelCase_, 3 ).any()
def a_ ( ):
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(lowerCAmelCase_ )
assert grad.any() and theta.any()
def a_ ( ):
__lowerCAmelCase = sp.make_sepia(lowerCAmelCase_, 20 )
assert sepia.all()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
__lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase_, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
__lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase_, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def a_ ( ):
__lowerCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(lowerCAmelCase_, 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert lbp_image.any()
| 53 | 1 |
def a_ ( lowerCAmelCase_ : int = 10 ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or n < 0:
raise ValueError('Invalid input' )
__lowerCAmelCase = 10**n
__lowerCAmelCase = 2_8433 * (pow(2, 783_0457, lowerCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 53 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53 | 1 |
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = [0 for i in range(len(lowerCAmelCase_ ) )]
# initialize interval's left pointer and right pointer
__lowerCAmelCase , __lowerCAmelCase = 0, 0
for i in range(1, len(lowerCAmelCase_ ) ):
# case when current index is inside the interval
if i <= right_pointer:
__lowerCAmelCase = min(right_pointer - i + 1, z_result[i - left_pointer] )
__lowerCAmelCase = min_edge
while go_next(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__lowerCAmelCase , __lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : list[int], lowerCAmelCase_ : str ):
return i + z_result[i] < len(lowerCAmelCase_ ) and s[z_result[i]] == s[i + z_result[i]]
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(lowerCAmelCase_ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[str]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_0_0
return config
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase ( self : Optional[int] ) -> Tuple:
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : str ):
# ===== initialization =====
__lowerCAmelCase = Mock()
__lowerCAmelCase = conn, Mock()
__lowerCAmelCase = iter([1, None] )
__lowerCAmelCase = lambda lowerCAmelCase_ : next(lowerCAmelCase_ )
# ===== invoke =====
send_file(filename='mytext.txt', testing=lowerCAmelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 53 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Union[str, Any] = 2
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , *, # begin keyword-only arguments
lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : Dict="<pad>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = bos, unk, pad, eos
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = len(self.symbols )
def __eq__( self : Dict , lowerCAmelCase_ : Dict ) -> str:
return self.indices == other.indices
def __getitem__( self : List[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple ) -> List[Any]:
return len(self.symbols )
def __contains__( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]:
return sym in self.indices
@classmethod
def lowercase ( cls : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = cls()
d.add_from_file(lowerCAmelCase_ )
return d
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Any=False ) -> Optional[Any]:
if word in self.indices and not overwrite:
__lowerCAmelCase = self.indices[word]
__lowerCAmelCase = self.count[idx] + n
return idx
else:
__lowerCAmelCase = len(self.symbols )
__lowerCAmelCase = idx
self.symbols.append(lowerCAmelCase_ )
self.count.append(lowerCAmelCase_ )
return idx
def lowercase ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return 0
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> int:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowerCAmelCase_ ) )
return
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = self._load_meta(lowerCAmelCase_ )
for line in lines[indices_start_line:]:
try:
__lowerCAmelCase , __lowerCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = line.rsplit(' ' , 1 )
else:
__lowerCAmelCase = False
__lowerCAmelCase = int(lowerCAmelCase_ )
__lowerCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowerCAmelCase_ ) )
self.add_symbol(lowerCAmelCase_ , n=lowerCAmelCase_ , overwrite=lowerCAmelCase_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def a_ ( lowerCAmelCase_ : List[str] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase = dict((re.sub(R'@@$', '', lowerCAmelCase_ ), v) if k.endswith('@@' ) else (re.sub(R'$', '</w>', lowerCAmelCase_ ), v) for k, v in d.items() )
__lowerCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCAmelCase = d[k] # restore
return da
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
# prep
if not os.path.exists(lowerCAmelCase_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'checkpoint.pt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = chkpt['cfg']['model']
# dicts
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'dict.txt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__lowerCAmelCase = Dictionary.load(lowerCAmelCase_ )
__lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# merges_file (bpecodes)
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'bpecodes' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCAmelCase_, lowerCAmelCase_ )
# model config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'config.json' )
__lowerCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# tokenizer config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# model
__lowerCAmelCase = chkpt['model']
# remove unneeded keys
__lowerCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
else:
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
__lowerCAmelCase = BioGptConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = BioGptForCausalLM(lowerCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase_ )
# save
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
print('Conversion is done!' )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : List[str] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['YolosFeatureExtractor']
_snake_case : Optional[Any] = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """pixel_values"""
a_ = False
a_ = TimmBackboneConfig
def __init__( self : Tuple , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
requires_backends(self , 'timm' )
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCAmelCase_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCAmelCase = getattr(lowerCAmelCase_ , 'use_pretrained_backbone' , lowerCAmelCase_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(lowerCAmelCase_ , 'out_indices' , lowerCAmelCase_ ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=lowerCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase_ , **lowerCAmelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer['module']: str(lowerCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : int , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop('use_timm_backbone' , lowerCAmelCase_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
__lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , features_only=lowerCAmelCase_ , use_pretrained_backbone=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , )
return super()._from_config(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
pass
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = tuple(lowerCAmelCase_ ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , attentions=lowerCAmelCase_ )
| 53 | 1 |
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ''
for i in table:
res += inp[i - 1]
return res
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return data[1:] + data[0]
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = ''
for i in range(len(lowerCAmelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = int('0b' + data[0] + data[-1], 2 )
__lowerCAmelCase = int('0b' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = message[:4]
__lowerCAmelCase = message[4:]
__lowerCAmelCase = apply_table(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = xor(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = apply_sbox(lowerCAmelCase_, temp[:4] ) # noqa: E741
__lowerCAmelCase = apply_sbox(lowerCAmelCase_, temp[4:] )
__lowerCAmelCase = '0' * (2 - len(lowerCAmelCase_ )) + l # noqa: E741
__lowerCAmelCase = '0' * (2 - len(lowerCAmelCase_ )) + r
__lowerCAmelCase = apply_table(l + r, lowerCAmelCase_ )
__lowerCAmelCase = xor(lowerCAmelCase_, lowerCAmelCase_ )
return temp + right
if __name__ == "__main__":
_snake_case : str = input('Enter 10 bit key: ')
_snake_case : Any = input('Enter 8 bit message: ')
_snake_case : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case : List[Any] = [2, 4, 3, 1]
_snake_case : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case : Union[str, Any] = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case : Optional[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case : Optional[Any] = apply_table(key, paa_table)
_snake_case : Any = temp[:5]
_snake_case : Dict = temp[5:]
_snake_case : Dict = left_shift(left)
_snake_case : Any = left_shift(right)
_snake_case : Optional[int] = apply_table(left + right, pa_table)
_snake_case : Optional[Any] = left_shift(left)
_snake_case : Any = left_shift(right)
_snake_case : Tuple = left_shift(left)
_snake_case : List[str] = left_shift(right)
_snake_case : Optional[Any] = apply_table(left + right, pa_table)
# encryption
_snake_case : Any = apply_table(message, IP)
_snake_case : Optional[Any] = function(expansion, sa, sa, keya, temp)
_snake_case : Optional[int] = temp[4:] + temp[:4]
_snake_case : Optional[Any] = function(expansion, sa, sa, keya, temp)
_snake_case : str = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
_snake_case : Tuple = apply_table(CT, IP)
_snake_case : Dict = function(expansion, sa, sa, keya, temp)
_snake_case : Optional[int] = temp[4:] + temp[:4]
_snake_case : Any = function(expansion, sa, sa, keya, temp)
_snake_case : Optional[Any] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 53 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[float] ):
if len(lowerCAmelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 | 1 |
import itertools
import math
def a_ ( lowerCAmelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(lowerCAmelCase_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( ):
__lowerCAmelCase = 2
while True:
if is_prime(lowerCAmelCase_ ):
yield num
num += 1
def a_ ( lowerCAmelCase_ : int = 1_0001 ):
return next(itertools.islice(prime_generator(), nth - 1, lowerCAmelCase_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="relu" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : Tuple ) -> List[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_ = False
a_ = False
a_ = False
def lowercase ( self : Dict ) -> None:
__lowerCAmelCase = FlaxRegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : str ) -> Union[str, Any]:
return
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self : Tuple ) -> Tuple:
pass
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : str ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='np' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_snake_case : str = datasets.logging.get_logger(__name__)
_snake_case : Union[str, Any] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
_snake_case : List[str] = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
_snake_case : Tuple = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
_snake_case : Union[str, Any] = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> List[Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
__lowerCAmelCase = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
__lowerCAmelCase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowerCAmelCase = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowerCAmelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowerCAmelCase = score.BleurtScorer(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> List[Any]:
__lowerCAmelCase = self.scorer.score(references=lowerCAmelCase_ , candidates=lowerCAmelCase_ )
return {"scores": scores}
| 53 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowercase ( self : List[Any] ) -> List[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowercase ( self : int ) -> int:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(lowerCAmelCase_ )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", )
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(lowerCAmelCase_, data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : str ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'], padding=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'] if training_args.do_train else None, eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir, 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a_ ( lowerCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_snake_case : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
_snake_case : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a_ ( ):
__lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def a_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase_, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def a_ ( ):
__lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a_ ( ):
__lowerCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(lowerCAmelCase_ )
# assert canny array for at least one True
assert canny_array.any()
def a_ ( ):
assert gg.gaussian_filter(lowerCAmelCase_, 5, sigma=0.9 ).all()
def a_ ( ):
# laplace diagonals
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(lowerCAmelCase_, lowerCAmelCase_ ).astype(lowerCAmelCase_ )
assert res.any()
def a_ ( ):
assert med.median_filter(lowerCAmelCase_, 3 ).any()
def a_ ( ):
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(lowerCAmelCase_ )
assert grad.any() and theta.any()
def a_ ( ):
__lowerCAmelCase = sp.make_sepia(lowerCAmelCase_, 20 )
assert sepia.all()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
__lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase_, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
__lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase_, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def a_ ( ):
__lowerCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(lowerCAmelCase_, 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert lbp_image.any()
| 53 |
def a_ ( lowerCAmelCase_ : int = 200_0000 ):
__lowerCAmelCase = [0 for i in range(n + 1 )]
__lowerCAmelCase = 1
__lowerCAmelCase = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, lowerCAmelCase_ ):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53 | 1 |
import torch
from diffusers import StableDiffusionPipeline
_snake_case : str = 'path-to-your-trained-model'
_snake_case : List[str] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
_snake_case : List[str] = 'A photo of sks dog in a bucket'
_snake_case : List[str] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 53 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_snake_case : Tuple = logging.getLogger()
_snake_case : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any , lowerCAmelCase_ : Dict ) -> Optional[int]:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowerCAmelCase = {'source': 'What is love ?', 'target': 'life'}
__lowerCAmelCase = {'train': 1_2, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__lowerCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCAmelCase_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str = "pytorch" ) -> List[str]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'output' )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'data' )
self._create_dummy_data(data_dir=lowerCAmelCase_ )
__lowerCAmelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'metrics.json' )
with open(lowerCAmelCase_ ) as f:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
return result
@require_torch_gpu
def lowercase ( self : str ) -> int:
__lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 53 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]="resnet50" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : List[Any] ) -> Union[str, Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> int:
__lowerCAmelCase = TimmBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase ( self : List[str] ) -> Tuple:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase ( self : str ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Any ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase ( self : Dict ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Any ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Tuple ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase ( self : int ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase ( self : Dict ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[str] ) -> Optional[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
| 53 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_snake_case : Any = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
a_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
a_ = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowercase ( self : List[Any] ) -> Dict:
__lowerCAmelCase = self.task_name.lower()
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """train"""
a_ = """dev"""
a_ = """test"""
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
a_ = 42
def __init__( self : Any , lowerCAmelCase_ : GlueDataTrainingArguments , lowerCAmelCase_ : PreTrainedTokenizerBase , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Union[str, Split] = Split.train , lowerCAmelCase_ : Optional[str] = None , ) -> List[str]:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , lowerCAmelCase_ , )
__lowerCAmelCase = args
__lowerCAmelCase = glue_processors[args.task_name]()
__lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
__lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCAmelCase , __lowerCAmelCase = label_list[2], label_list[1]
__lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + '.lock'
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(lowerCAmelCase_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowerCAmelCase = examples[:limit_length]
__lowerCAmelCase = glue_convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , max_length=args.max_seq_length , label_list=lowerCAmelCase_ , output_mode=self.output_mode , )
__lowerCAmelCase = time.time()
torch.save(self.features , lowerCAmelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Dict ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> InputFeatures:
return self.features[i]
def lowercase ( self : Any ) -> Dict:
return self.label_list
| 53 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a_ ( lowerCAmelCase_ : str=None ):
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser('env' )
else:
__lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file', default=lowerCAmelCase_, help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = is_xpu_available()
__lowerCAmelCase = is_npu_available()
__lowerCAmelCase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
__lowerCAmelCase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'PyTorch XPU available': str(lowerCAmelCase_ ),
'PyTorch NPU available': str(lowerCAmelCase_ ),
'System RAM': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__lowerCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCAmelCase = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_, lowerCAmelCase_ )
else F"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__lowerCAmelCase = accelerate_config
return info
def a_ ( ):
__lowerCAmelCase = env_command_parser()
__lowerCAmelCase = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 53 | 1 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowerCAmelCase = nums[0]
__lowerCAmelCase = 0
for num in nums[1:]:
__lowerCAmelCase , __lowerCAmelCase = (
max_excluding + num,
max(lowerCAmelCase_, lowerCAmelCase_ ),
)
return max(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
__lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=lowerCAmelCase_, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=lowerCAmelCase_, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=lowerCAmelCase_ )
return parser.parse_args()
def a_ ( ):
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 53 | 1 |
# Function to print upper half of diamond (pyramid)
def a_ ( lowerCAmelCase_ : Optional[int] ):
for i in range(0, lowerCAmelCase_ ):
for _ in range(0, n - i - 1 ): # printing spaces
print(' ', end='' )
for _ in range(0, i + 1 ): # printing stars
print('* ', end='' )
print()
def a_ ( lowerCAmelCase_ : str ):
for i in range(lowerCAmelCase_, 0, -1 ):
for _ in range(lowerCAmelCase_, 0, -1 ): # printing stars
print('* ', end='' )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(' ', end='' )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
_snake_case : Optional[Any] = 1
while K:
_snake_case : Any = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
_snake_case : Optional[int] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 53 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Tuple=[2, 2, 3, 2] , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = num_stages
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase ( self : Dict ) -> List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase_ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = UperNetForSemanticSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = UperNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Tuple ) -> Union[str, Any]:
return
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : str ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> List[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
__lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowercase ( self : Any ) -> int:
pass
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' )
__lowerCAmelCase = Image.open(lowerCAmelCase_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
if issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = text_path
elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = 'train'
__lowerCAmelCase = {'train': text_path, 'test': text_path}
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 53 | 1 |
def a_ ( lowerCAmelCase_ : int = 100_0000 ):
__lowerCAmelCase = limit + 1
__lowerCAmelCase = [0] * limit
for first_term in range(1, lowerCAmelCase_ ):
for n in range(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53 | 1 |
_snake_case : int = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
_snake_case : int = {value: key for key, value in encode_dict.items()}
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def a_ ( lowerCAmelCase_ : str ):
if set(lowerCAmelCase_ ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__lowerCAmelCase = ''
for word in coded.split():
while len(lowerCAmelCase_ ) != 0:
decoded += decode_dict[word[:5]]
__lowerCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 53 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = 5
# Realm tok
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def lowercase ( self : List[str] ) -> str:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : List[str] ) -> int:
__lowerCAmelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def lowercase ( self : str ) -> str:
__lowerCAmelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def lowercase ( self : List[Any] ) -> Dict:
__lowerCAmelCase = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=lowerCAmelCase_ , )
return block_records
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.get_dummy_retriever()
__lowerCAmelCase = retriever.tokenizer
__lowerCAmelCase = np.array([0, 3] , dtype='long' )
__lowerCAmelCase = tokenizer(['Test question'] ).input_ids
__lowerCAmelCase = tokenizer(
['the fourth'] , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ).input_ids
__lowerCAmelCase = config.reader_seq_len
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever(
lowerCAmelCase_ , lowerCAmelCase_ , answer_ids=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors='np' )
self.assertEqual(len(lowerCAmelCase_ ) , 2 )
self.assertEqual(len(lowerCAmelCase_ ) , 2 )
self.assertEqual(len(lowerCAmelCase_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.get_dummy_retriever()
__lowerCAmelCase = retriever.tokenizer
__lowerCAmelCase = np.array([0, 3, 5] , dtype='long' )
__lowerCAmelCase = tokenizer(['Test question'] ).input_ids
__lowerCAmelCase = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ).input_ids
__lowerCAmelCase = config.reader_seq_len
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever(
lowerCAmelCase_ , lowerCAmelCase_ , answer_ids=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors='np' )
self.assertEqual([False, True, True] , lowerCAmelCase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowerCAmelCase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
__lowerCAmelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
__lowerCAmelCase = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowerCAmelCase = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
| 53 |
import math
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int ):
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
__lowerCAmelCase = 0
while arr[min(lowerCAmelCase_, lowerCAmelCase_ ) - 1] < x:
__lowerCAmelCase = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCAmelCase = prev + 1
if prev == min(lowerCAmelCase_, lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case : List[str] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Optional[Any] = [int(item) for item in user_input.split(',')]
_snake_case : List[str] = int(input('Enter the number to be searched:\n'))
_snake_case : Optional[int] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""")
| 53 | 1 |
_snake_case : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
_snake_case : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__lowerCAmelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
F"""Valid values are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int = 1_3 , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 1_2_8 , lowerCAmelCase_ : Any=[1_6, 3_2, 6_4, 1_2_8] , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 3_7 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 1_2_8 , lowerCAmelCase_ : List[int] = [2, 2, 2, 2] , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , ) -> str:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = encoder_stride
__lowerCAmelCase = num_attention_outputs
__lowerCAmelCase = embed_dim
__lowerCAmelCase = embed_dim + 1
__lowerCAmelCase = resolution
__lowerCAmelCase = depths
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = dim
__lowerCAmelCase = mlp_expansion_ratio
def lowercase ( self : Any ) -> Dict:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Any ) -> List[str]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> List[str]:
__lowerCAmelCase = TFEfficientFormerModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> Dict:
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a_ = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = TFEfficientFormerModelTester(self )
__lowerCAmelCase = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowercase ( self : Dict ) -> Any:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
pass
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> List[str]:
def check_hidden_states_output(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
__lowerCAmelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
__lowerCAmelCase = seq_length * self.model_tester.chunk_length
else:
__lowerCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowerCAmelCase = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
__lowerCAmelCase = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
__lowerCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict=False ) -> Dict:
__lowerCAmelCase = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase ( self : str ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowercase ( self : Dict ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Any ) -> List[Any]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
__lowerCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
__lowerCAmelCase = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
__lowerCAmelCase = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
__lowerCAmelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowercase ( self : str ) -> List[str]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowerCAmelCase = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowerCAmelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> Optional[int]:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowercase ( self : List[str] ) -> List[Any]:
__lowerCAmelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
__lowerCAmelCase = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
__lowerCAmelCase = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53 | 1 |
def a_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
__lowerCAmelCase = str(lowerCAmelCase_ )
__lowerCAmelCase = ''.join(sorted(lowerCAmelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def a_ ( lowerCAmelCase_ : float = 99 ):
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
__lowerCAmelCase = 0
__lowerCAmelCase = 1
while True:
if check_bouncy(lowerCAmelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 53 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case : List[Any] = True
from torch.cuda.amp import autocast
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : str=None ):
return field(default_factory=lambda: default, metadata=lowerCAmelCase_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a_ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
a_ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
a_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
a_ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
def __call__( self : int , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCAmelCase = [{'input_values': feature['input_values']} for feature in features]
__lowerCAmelCase = [{'input_ids': feature['labels']} for feature in features]
__lowerCAmelCase = self.processor.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCAmelCase = self.processor.pad(
labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCAmelCase = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__lowerCAmelCase = labels
return batch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__lowerCAmelCase = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCAmelCase = datasets.load_dataset(
'common_voice', data_args.dataset_config_name, split=data_args.train_split_name )
__lowerCAmelCase = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test' )
# Create and save tokenizer
__lowerCAmelCase = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCAmelCase_ : Any ):
__lowerCAmelCase = re.sub(lowerCAmelCase_, '', batch['sentence'] ).lower() + ' '
return batch
__lowerCAmelCase = train_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
__lowerCAmelCase = eval_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
def extract_all_chars(lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = ' '.join(batch['text'] )
__lowerCAmelCase = list(set(lowerCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=train_dataset.column_names, )
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=eval_dataset.column_names, )
__lowerCAmelCase = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCAmelCase = {v: k for k, v in enumerate(lowerCAmelCase_ )}
__lowerCAmelCase = vocab_dict[' ']
del vocab_dict[" "]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
with open('vocab.json', 'w' ) as vocab_file:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = WavaVecaCTCTokenizer(
'vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|', )
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCAmelCase_ ), data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCAmelCase_ ) )
if data_args.max_val_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCAmelCase = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase_ : int ):
__lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch['path'] )
__lowerCAmelCase = resampler(lowerCAmelCase_ ).squeeze().numpy()
__lowerCAmelCase = 1_6000
__lowerCAmelCase = batch['text']
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCAmelCase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__lowerCAmelCase = processor(
audio=batch['speech'], text=batch['target_text'], sampling_rate=batch['sampling_rate'][0] )
batch.update(lowerCAmelCase_ )
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
# Metric
__lowerCAmelCase = datasets.load_metric('wer' )
def compute_metrics(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = pred.predictions
__lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=-1 )
__lowerCAmelCase = processor.tokenizer.pad_token_id
__lowerCAmelCase = processor.batch_decode(lowerCAmelCase_ )
# we do not want to group tokens when computing the metrics
__lowerCAmelCase = processor.batch_decode(pred.label_ids, group_tokens=lowerCAmelCase_ )
__lowerCAmelCase = wer_metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCAmelCase = DataCollatorCTCWithPadding(processor=lowerCAmelCase_, padding=lowerCAmelCase_ )
# Initialize our Trainer
__lowerCAmelCase = CTCTrainer(
model=lowerCAmelCase_, data_collator=lowerCAmelCase_, args=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('train', lowerCAmelCase_ )
trainer.save_metrics('train', lowerCAmelCase_ )
trainer.save_state()
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ )
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('eval', lowerCAmelCase_ )
trainer.save_metrics('eval', lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 53 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.