code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A__ ( __snake_case , __snake_case , __snake_case ):
@register_to_config
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ = False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Dict = nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = False
UpperCamelCase : List[str] = nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = TaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , d_model=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , d_kv=SCREAMING_SNAKE_CASE_ , d_ff=SCREAMING_SNAKE_CASE_ , dropout_rate=SCREAMING_SNAKE_CASE_ , feed_forward_proj=SCREAMING_SNAKE_CASE_ , is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[int] = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = TaBlock(SCREAMING_SNAKE_CASE_ )
self.encoders.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = TaLayerNorm(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = self.token_embedder(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = encoder_input_tokens.shape[1]
UpperCamelCase : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE_ , device=encoder_input_tokens.device )
x += self.position_encoding(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.dropout_pre(SCREAMING_SNAKE_CASE_ )
# inverted the attention mask
UpperCamelCase : Optional[Any] = encoder_input_tokens.size()
UpperCamelCase : Optional[int] = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for lyr in self.encoders:
UpperCamelCase : Any = lyr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : List[Any] = self.layer_norm(SCREAMING_SNAKE_CASE_ )
return self.dropout_post(SCREAMING_SNAKE_CASE_ ), encoder_inputs_mask
| 52 |
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_UpperCamelCase = logging.getLogger(__name__)
class lowercase :
'''simple docstring'''
def __init__(self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = False
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> int:
"""simple docstring"""
if not self.initialized:
UpperCAmelCase__ = RagRetriever(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , )
UpperCAmelCase__ = True
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
self.retriever.index.init_index()
def UpperCamelCase__ (self , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.retriever._main_retrieve(__a , __a )
return doc_ids, retrieved_doc_embeds
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , __a , __a , __a , __a , __a=None ) -> str:
"""simple docstring"""
if index is not None and index.is_initialized() and len(__a ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , )
UpperCAmelCase__ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__a , __a , __a , __a )
for worker in self.retrieval_workers
] )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def UpperCamelCase__ (self , __a , __a ) -> Dict:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCAmelCase__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCAmelCase__ , UpperCAmelCase__ = ray.get(random_worker.retrieve.remote(__a , __a ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ = self._main_retrieve(__a , __a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__a )
@classmethod
def UpperCamelCase__ (cls , __a , __a=None , **__a ) -> Optional[int]:
"""simple docstring"""
return super(__a , cls ).get_tokenizers(__a , __a , **__a )
@classmethod
def UpperCamelCase__ (cls , __a , __a , __a=None , **__a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = kwargs.pop('config' , __a ) or RagConfig.from_pretrained(__a , **__a )
UpperCAmelCase__ = RagTokenizer.from_pretrained(__a , config=__a )
UpperCAmelCase__ = rag_tokenizer.question_encoder
UpperCAmelCase__ = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCAmelCase__ = 'custom'
UpperCAmelCase__ = CustomHFIndex(config.retrieval_vector_size , __a )
else:
UpperCAmelCase__ = cls._build_index(__a )
return cls(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , retrieval_workers=__a , index=__a , )
| 335 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase_( snake_case__: int ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase__ = k.replace(snake_case__ , snake_case__ )
return k
def UpperCamelCase_( snake_case__: dict , snake_case__: dict ) -> PegasusForConditionalGeneration:
UpperCAmelCase__ = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
UpperCAmelCase__ = PegasusConfig(**snake_case__ )
UpperCAmelCase__ = PegasusForConditionalGeneration(snake_case__ )
UpperCAmelCase__ = torch_model.model.state_dict()
UpperCAmelCase__ = {}
for k, v in tf_weights.items():
UpperCAmelCase__ = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
UpperCAmelCase__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCamelCase_( snake_case__: int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCAmelCase__ = tf.train.list_variables(snake_case__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
UpperCAmelCase__ = array
return tf_weights
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Optional[Any]:
# save tokenizer first
UpperCAmelCase__ = Path(snake_case__ ).parent.name
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
UpperCAmelCase__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
UpperCAmelCase__ = get_tf_weights_as_numpy(snake_case__ )
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase__ = task_specific_params
UpperCAmelCase__ = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
UpperCAmelCase__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335 | 1 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _snake_case ( *_snake_case : Dict ):
with open(_snake_case , '''r''' ) as fh:
fcntl.flock(_snake_case , fcntl.LOCK_EX )
try:
print(*_snake_case )
finally:
fcntl.flock(_snake_case , fcntl.LOCK_UN )
snake_case__ : str = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
snake_case__ : int = torch.device('''cuda''', local_rank)
snake_case__ : Union[str, Any] = socket.gethostname()
snake_case__ : str = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
snake_case__ : List[Any] = dist.get_rank()
snake_case__ : List[str] = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 60 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = StableDiffusionInstructPixaPixPipeline
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A__ = CLIPTextModel(UpperCamelCase )
A__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase ( self: str , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any]=0 ):
"""simple docstring"""
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("""RGB""" )
if str(UpperCamelCase ).startswith("""mps""" ):
A__ = torch.manual_seed(UpperCamelCase )
else:
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
A__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase )
A__ = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = self.get_dummy_inputs(UpperCamelCase )
A__ = sd_pipe(**UpperCamelCase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase )
A__ = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = self.get_dummy_inputs(UpperCamelCase )
A__ = """french fries"""
A__ = sd_pipe(**UpperCamelCase , negative_prompt=UpperCamelCase )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase )
A__ = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = self.get_dummy_inputs(UpperCamelCase )
A__ = [inputs["""prompt"""]] * 2
A__ = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
A__ = torch.from_numpy(UpperCamelCase ).unsqueeze(0 ).to(UpperCamelCase )
A__ = image / 2 + 0.5
A__ = image.permute(0 , 3 , 1 , 2 )
A__ = image.repeat(2 , 1 , 1 , 1 )
A__ = sd_pipe(**UpperCamelCase ).images
A__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A__ = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase )
A__ = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = self.get_dummy_inputs(UpperCamelCase )
A__ = sd_pipe(**UpperCamelCase ).images
A__ = image[0, -3:, -3:, -1]
A__ = [round(UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self: str ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase )
A__ = VaeImageProcessor(do_resize=UpperCamelCase , do_normalize=UpperCamelCase )
A__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase , input_image_type="""pt""" ) )[0]
A__ = components["""vae"""]
A__ = self.get_dummy_inputs_by_type(UpperCamelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A__ = vae.encode(inputs[image_param] ).latent_dist.mode()
A__ = pipe(**UpperCamelCase )[0]
A__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[Any] , UpperCamelCase: List[str]=0 ):
"""simple docstring"""
A__ = torch.manual_seed(UpperCamelCase )
A__ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
A__ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase ).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase )
A__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase ).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase )
A__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase ).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = 0
def callback_fn(UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: torch.FloatTensor ) -> None:
A__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ = latents[0, -3:, -3:, -1]
A__ = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ = latents[0, -3:, -3:, -1]
A__ = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A__ = False
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase , torch_dtype=torch.floataa )
A__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
pipe(**UpperCamelCase , callback=UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase , torch_dtype=torch.floataa )
A__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = inputs["""image"""].resize((5_04, 5_04) )
A__ = """timbrooks/instruct-pix2pix"""
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = pipe(**UpperCamelCase )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
A__ = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 69 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[int] , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 69 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 183 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_SCREAMING_SNAKE_CASE : Optional[int] = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class a ( unittest.TestCase , __snake_case ):
def UpperCamelCase ( self : int ) -> Optional[int]:
lowerCamelCase_ = load_tool('text-question-answering' )
self.tool.setup()
lowerCamelCase_ = load_tool('text-question-answering' , remote=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Dict ) -> Tuple:
lowerCamelCase_ = self.tool(__SCREAMING_SNAKE_CASE , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
lowerCamelCase_ = self.remote_tool(__SCREAMING_SNAKE_CASE , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
def UpperCamelCase ( self : List[str] ) -> Optional[int]:
lowerCamelCase_ = self.tool(text=__SCREAMING_SNAKE_CASE , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
def UpperCamelCase ( self : Union[str, Any] ) -> int:
lowerCamelCase_ = self.remote_tool(text=__SCREAMING_SNAKE_CASE , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
| 183 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : List[str] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__UpperCAmelCase : Optional[Any] = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ )
else:
__UpperCAmelCase : int = None
__UpperCAmelCase : Tuple = torch.nn.Parameter(UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self : str , UpperCAmelCase_ : VQModel , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : TransformeraDModel , UpperCAmelCase_ : VQDiffusionScheduler , UpperCAmelCase_ : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=UpperCAmelCase_ , transformer=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = len(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else 1
# get prompt text embeddings
__UpperCAmelCase : Optional[Any] = self.tokenizer(
UpperCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__UpperCAmelCase : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__UpperCAmelCase : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__UpperCAmelCase : int = text_input_ids[:, : self.tokenizer.model_max_length]
__UpperCAmelCase : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__UpperCAmelCase : Any = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCAmelCase_ )
# duplicate text embeddings for each generation per prompt
__UpperCAmelCase : str = prompt_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__UpperCAmelCase : str = self.learned_classifier_free_sampling_embeddings.embeddings
__UpperCAmelCase : str = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCAmelCase_ , 1 , 1 )
else:
__UpperCAmelCase : int = [""] * batch_size
__UpperCAmelCase : Tuple = text_input_ids.shape[-1]
__UpperCAmelCase : Tuple = self.tokenizer(
UpperCAmelCase_ , padding="max_length" , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" , )
__UpperCAmelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__UpperCAmelCase : Any = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCAmelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__UpperCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__UpperCAmelCase : str = negative_prompt_embeds.repeat(1 , UpperCAmelCase_ , 1 )
__UpperCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCAmelCase : List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , ):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : int = 1
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : Any = len(UpperCAmelCase_ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}" )
__UpperCAmelCase : Union[str, Any] = batch_size * num_images_per_prompt
__UpperCAmelCase : str = guidance_scale > 1.0
__UpperCAmelCase : Optional[int] = self._encode_prompt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(UpperCAmelCase_ )}." )
# get the initial completely masked latents unless the user supplied it
__UpperCAmelCase : str = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__UpperCAmelCase : List[Any] = self.transformer.num_vector_embeds - 1
__UpperCAmelCase : int = torch.full(UpperCAmelCase_ , UpperCAmelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
__UpperCAmelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device )
__UpperCAmelCase : Tuple = self.scheduler.timesteps.to(self.device )
__UpperCAmelCase : Any = latents
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__UpperCAmelCase : int = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__UpperCAmelCase : Any = self.transformer(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , timestep=UpperCAmelCase_ ).sample
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model_output.chunk(2 )
__UpperCAmelCase : Optional[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCAmelCase_ , dim=1 , keepdim=UpperCAmelCase_ )
__UpperCAmelCase : int = self.truncate(UpperCAmelCase_ , UpperCAmelCase_ )
# remove `log(0)`'s (`-inf`s)
__UpperCAmelCase : Optional[Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = self.vqvae.config.vq_embed_dim
__UpperCAmelCase : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__UpperCAmelCase : List[str] = self.vqvae.quantize.get_codebook_entry(UpperCAmelCase_ , shape=UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = self.vqvae.decode(UpperCAmelCase_ , force_not_quantize=UpperCAmelCase_ ).sample
__UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : int = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : float ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : List[str] = torch.sort(UpperCAmelCase_ , 1 , descending=UpperCAmelCase_ )
__UpperCAmelCase : int = torch.exp(UpperCAmelCase_ )
__UpperCAmelCase : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__UpperCAmelCase : int = torch.full_like(keep_mask[:, 0:1, :] , UpperCAmelCase_ )
__UpperCAmelCase : str = torch.cat((all_true, keep_mask) , dim=1 )
__UpperCAmelCase : List[Any] = keep_mask[:, :-1, :]
__UpperCAmelCase : Any = keep_mask.gather(1 , indices.argsort(1 ) )
__UpperCAmelCase : int = log_p_x_0.clone()
__UpperCAmelCase : int = -torch.inf # -inf = log(0)
return rv
| 37 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] , **UpperCAmelCase_ : Dict ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : Union[str, Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCAmelCase : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}." ):
"""simple docstring"""
__UpperCAmelCase : Tuple = load_image(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCAmelCase : Dict = candidate_labels
__UpperCAmelCase : Any = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels]
__UpperCAmelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = [text_inputs]
return inputs
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = model_inputs.pop("candidate_labels" )
__UpperCAmelCase : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Optional[int] = text_inputs[0][0]
__UpperCAmelCase : Any = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = model_outputs.pop("candidate_labels" )
__UpperCAmelCase : Tuple = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCAmelCase : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCAmelCase : Dict = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCAmelCase : Union[str, Any] = stable_softmax(UpperCAmelCase_ , axis=-1 )
__UpperCAmelCase : List[str] = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__UpperCAmelCase : Dict = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda UpperCAmelCase_ : -x[0] )
]
return result
| 37 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''markuplm'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Dict=7_6_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : int=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Dict=2_5_6 , SCREAMING_SNAKE_CASE__ : List[Any]=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_1_6 , SCREAMING_SNAKE_CASE__ : List[str]=1_0_0_1 , SCREAMING_SNAKE_CASE__ : Tuple=3_2 , SCREAMING_SNAKE_CASE__ : str=5_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="absolute" , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Any , ) -> Optional[Any]:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : List[Any] = vocab_size
a_ : List[Any] = hidden_size
a_ : List[str] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Tuple = hidden_act
a_ : Dict = intermediate_size
a_ : Dict = hidden_dropout_prob
a_ : Optional[Any] = attention_probs_dropout_prob
a_ : Any = max_position_embeddings
a_ : Union[str, Any] = type_vocab_size
a_ : int = initializer_range
a_ : Dict = layer_norm_eps
a_ : int = position_embedding_type
a_ : Optional[Any] = use_cache
a_ : Optional[int] = classifier_dropout
# additional properties
a_ : Tuple = max_depth
a_ : Union[str, Any] = max_xpath_tag_unit_embeddings
a_ : List[Any] = max_xpath_subs_unit_embeddings
a_ : str = tag_pad_id
a_ : Optional[int] = subs_pad_id
a_ : Optional[int] = xpath_unit_hidden_size
| 32 |
from __future__ import annotations
UpperCAmelCase_ : Tuple = []
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int , __A : int ) -> bool:
"""simple docstring"""
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int ) -> bool:
"""simple docstring"""
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
a_ : Any = 1
solve(__A , row + 1 )
a_ : Tuple = 0
return False
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase_ : List[str] = 8
UpperCAmelCase_ : str = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 32 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
snake_case = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
snake_case = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase__ ( ):
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE : int = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase__ ( ):
"""simple docstring"""
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
SCREAMING_SNAKE_CASE : Tuple = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def lowerCamelCase__ ( ):
"""simple docstring"""
assert med.median_filter(lowercase , 3 ).any()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def lowerCamelCase__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = bs.Burkes(imread(lowercase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = rs.NearestNeighbour(imread(lowercase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE : Dict = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE : Optional[Any] = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE : List[Any] = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 319 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''ClapFeatureExtractor'''
UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 319 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = """▁"""
__snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__snake_case = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
__snake_case = {
"""google/pegasus-xsum""": 512,
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = PegasusTokenizer
__UpperCAmelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<pad>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<mask_2>" , UpperCamelCase__="<mask_1>" , UpperCamelCase__=None , UpperCamelCase__=103 , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
snake_case : str = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError(
F'additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is'
F' {type(UpperCamelCase__ )}' )
snake_case : int = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(UpperCamelCase__ ) , self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
snake_case : Optional[int] = additional_special_tokens_extended
else:
snake_case : Optional[int] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , pad_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , mask_token_sent=UpperCamelCase__ , offset=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : Tuple = vocab_file
snake_case : List[str] = False if not self.vocab_file else True
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 203 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=12 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=0 , UpperCamelCase__=None , ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Dict = batch_size
snake_case : List[str] = seq_length
snake_case : Dict = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : Optional[int] = use_labels
snake_case : Tuple = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Optional[Any] = projection_dim
snake_case : List[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : int = intermediate_size
snake_case : str = dropout
snake_case : List[Any] = attention_dropout
snake_case : Any = max_position_embeddings
snake_case : List[Any] = initializer_range
snake_case : Any = scope
snake_case : Union[str, Any] = bos_token_id
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : int = None
if self.use_input_mask:
snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case : Tuple = input_mask.numpy()
snake_case ,snake_case : str = input_mask.shape
snake_case : Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
snake_case : int = 1
snake_case : Tuple = 0
snake_case : Union[str, Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = TFBlipTextModel(config=UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , training=UpperCamelCase__ )
snake_case : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case : Tuple = config_and_inputs
snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Any = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCAmelCase : Any = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = BlipTextModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@slow
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = TFBlipTextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__=True ) -> Optional[int]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase__ )
| 203 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if config is None:
assert isinstance(self.model ,SCREAMING_SNAKE_CASE__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__SCREAMING_SNAKE_CASE :Optional[Any] = self.model.config
else:
__SCREAMING_SNAKE_CASE :Any = config
__SCREAMING_SNAKE_CASE :str = data_args
__SCREAMING_SNAKE_CASE :Tuple = self.config.tgt_vocab_size if isinstance(self.config ,SCREAMING_SNAKE_CASE__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__SCREAMING_SNAKE_CASE :Any = label_smoothed_nll_loss
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
if self.optimizer is None:
__SCREAMING_SNAKE_CASE :str = ['''bias''', '''LayerNorm.weight''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
__SCREAMING_SNAKE_CASE :int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__SCREAMING_SNAKE_CASE :List[str] = Adafactor
__SCREAMING_SNAKE_CASE :List[Any] = {'''scale_parameter''': False, '''relative_step''': False}
else:
__SCREAMING_SNAKE_CASE :List[str] = AdamW
__SCREAMING_SNAKE_CASE :Tuple = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
__SCREAMING_SNAKE_CASE :List[str] = self.args.learning_rate
if self.sharded_ddp:
__SCREAMING_SNAKE_CASE :Dict = OSS(
params=SCREAMING_SNAKE_CASE__ ,optim=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
else:
__SCREAMING_SNAKE_CASE :str = optimizer_cls(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if self.lr_scheduler is None:
__SCREAMING_SNAKE_CASE :List[Any] = self._get_lr_scheduler(SCREAMING_SNAKE_CASE__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__SCREAMING_SNAKE_CASE :Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__SCREAMING_SNAKE_CASE :int = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=SCREAMING_SNAKE_CASE__ )
return scheduler
def _UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]:
"""simple docstring"""
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ ,use_cache=SCREAMING_SNAKE_CASE__ )[0]
__SCREAMING_SNAKE_CASE :Optional[int] = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,use_cache=SCREAMING_SNAKE_CASE__ )[:2]
else:
# compute label smoothed loss
__SCREAMING_SNAKE_CASE :str = model(**SCREAMING_SNAKE_CASE__ ,use_cache=SCREAMING_SNAKE_CASE__ )[0]
__SCREAMING_SNAKE_CASE :Optional[int] = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ ,dim=-1 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = self.loss_fn(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = inputs.pop('''labels''' )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = self._compute_loss(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return loss
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self._prepare_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__SCREAMING_SNAKE_CASE :int = self.model.generate(
inputs['''input_ids'''] ,attention_mask=inputs['''attention_mask'''] ,**SCREAMING_SNAKE_CASE__ ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__SCREAMING_SNAKE_CASE :int = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE__ ,gen_kwargs['''max_length'''] )
__SCREAMING_SNAKE_CASE :Dict = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = self._compute_loss(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__SCREAMING_SNAKE_CASE :List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__SCREAMING_SNAKE_CASE :Optional[int] = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE__ ,gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
__SCREAMING_SNAKE_CASE :Optional[int] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
__SCREAMING_SNAKE_CASE :str = tensor
return padded_tensor | 239 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''xlm-prophetnet'''
SCREAMING_SNAKE_CASE_ : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = "gelu" ,SCREAMING_SNAKE_CASE__ = 3_05_22 ,SCREAMING_SNAKE_CASE__ = 10_24 ,SCREAMING_SNAKE_CASE__ = 40_96 ,SCREAMING_SNAKE_CASE__ = 12 ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 40_96 ,SCREAMING_SNAKE_CASE__ = 12 ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = 5_12 ,SCREAMING_SNAKE_CASE__ = 0.0_2 ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 2 ,SCREAMING_SNAKE_CASE__ = 32 ,SCREAMING_SNAKE_CASE__ = 1_28 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = 2 ,**SCREAMING_SNAKE_CASE__ ,) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = vocab_size
__SCREAMING_SNAKE_CASE :Tuple = hidden_size
__SCREAMING_SNAKE_CASE :Optional[int] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE :Optional[int] = num_encoder_layers
__SCREAMING_SNAKE_CASE :Tuple = num_encoder_attention_heads
__SCREAMING_SNAKE_CASE :List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_decoder_layers
__SCREAMING_SNAKE_CASE :Optional[Any] = num_decoder_attention_heads
__SCREAMING_SNAKE_CASE :List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Dict = init_std # Normal(0, this parameter)
__SCREAMING_SNAKE_CASE :List[Any] = activation_function
# parameters for xlmprophetnet
__SCREAMING_SNAKE_CASE :Tuple = ngram
__SCREAMING_SNAKE_CASE :int = num_buckets
__SCREAMING_SNAKE_CASE :Optional[int] = relative_max_distance
__SCREAMING_SNAKE_CASE :Union[str, Any] = disable_ngram_loss
__SCREAMING_SNAKE_CASE :Dict = eps
# 3 Types of Dropout
__SCREAMING_SNAKE_CASE :List[str] = attention_dropout
__SCREAMING_SNAKE_CASE :Dict = activation_dropout
__SCREAMING_SNAKE_CASE :Union[str, Any] = dropout
__SCREAMING_SNAKE_CASE :int = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,add_cross_attention=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' ) | 239 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A ( a_ ,a_=0.999 ,a_="cosine" ,) -> List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__UpperCamelCase : List[Any] =[]
for i in range(a_ ):
__UpperCamelCase : List[str] =i / num_diffusion_timesteps
__UpperCamelCase : Optional[int] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) ,a_ ) )
return torch.tensor(a_ ,dtype=torch.floataa )
class __A ( a , a ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =[e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase__ : Optional[int] =2
@register_to_config
def __init__( self , lowerCamelCase__ = 1000 , lowerCamelCase__ = 0.00_085 , lowerCamelCase__ = 0.012 , lowerCamelCase__ = "linear" , lowerCamelCase__ = None , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 1.0 , lowerCamelCase__ = "linspace" , lowerCamelCase__ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
__UpperCamelCase : Optional[int] =torch.tensor(lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCamelCase : str =torch.linspace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCamelCase : Optional[Any] =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCamelCase : Optional[int] =betas_for_alpha_bar(lowerCamelCase__ , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
__UpperCamelCase : str =betas_for_alpha_bar(lowerCamelCase__ , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
__UpperCamelCase : Union[str, Any] =1.0 - self.betas
__UpperCamelCase : str =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =use_karras_sigmas
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
if schedule_timesteps is None:
__UpperCamelCase : Union[str, Any] =self.timesteps
__UpperCamelCase : Tuple =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCamelCase : Tuple =1 if len(lowerCamelCase__ ) > 1 else 0
else:
__UpperCamelCase : Union[str, Any] =timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep
__UpperCamelCase : List[str] =self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.index_for_timestep(lowerCamelCase__ )
__UpperCamelCase : List[str] =self.sigmas[step_index]
__UpperCamelCase : Optional[Any] =sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
"""simple docstring"""
__UpperCamelCase : List[str] =num_inference_steps
__UpperCamelCase : Union[str, Any] =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCamelCase : Dict =np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase__ , dtype=lowerCamelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCamelCase : List[str] =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase : List[str] =(np.arange(0 , lowerCamelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCamelCase : Optional[Any] =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase : Any =(np.arange(lowerCamelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase__ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__UpperCamelCase : List[Any] =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCamelCase : int =np.log(lowerCamelCase__ )
__UpperCamelCase : str =np.interp(lowerCamelCase__ , np.arange(0 , len(lowerCamelCase__ ) ) , lowerCamelCase__ )
if self.config.use_karras_sigmas:
__UpperCamelCase : Optional[Any] =self._convert_to_karras(in_sigmas=lowerCamelCase__ , num_inference_steps=self.num_inference_steps )
__UpperCamelCase : List[Any] =np.array([self._sigma_to_t(lowerCamelCase__ , lowerCamelCase__ ) for sigma in sigmas] )
__UpperCamelCase : List[Any] =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCamelCase : List[str] =torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCamelCase : List[Any] =torch.from_numpy(lowerCamelCase__ )
__UpperCamelCase : str =torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCamelCase__ ).startswith('mps' ):
# mps does not support float64
__UpperCamelCase : Optional[int] =timesteps.to(lowerCamelCase__ , dtype=torch.floataa )
else:
__UpperCamelCase : List[Any] =timesteps.to(device=lowerCamelCase__ )
# empty dt and derivative
__UpperCamelCase : Dict =None
__UpperCamelCase : Optional[Any] =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCamelCase : List[str] =defaultdict(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =np.log(lowerCamelCase__ )
# get distribution
__UpperCamelCase : Any =log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__UpperCamelCase : Any =np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__UpperCamelCase : Optional[int] =low_idx + 1
__UpperCamelCase : Optional[int] =log_sigmas[low_idx]
__UpperCamelCase : Optional[int] =log_sigmas[high_idx]
# interpolate sigmas
__UpperCamelCase : Any =(low - log_sigma) / (low - high)
__UpperCamelCase : int =np.clip(lowerCamelCase__ , 0 , 1 )
# transform interpolation to time range
__UpperCamelCase : Tuple =(1 - w) * low_idx + w * high_idx
__UpperCamelCase : Optional[int] =t.reshape(sigma.shape )
return t
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : float =in_sigmas[-1].item()
__UpperCamelCase : float =in_sigmas[0].item()
__UpperCamelCase : Dict =7.0 # 7.0 is the value used in the paper
__UpperCamelCase : str =np.linspace(0 , 1 , lowerCamelCase__ )
__UpperCamelCase : int =sigma_min ** (1 / rho)
__UpperCamelCase : Tuple =sigma_max ** (1 / rho)
__UpperCamelCase : Dict =(max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self ):
"""simple docstring"""
return self.dt is None
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.index_for_timestep(lowerCamelCase__ )
# advance index counter by 1
__UpperCamelCase : Optional[int] =timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCamelCase : List[str] =self.sigmas[step_index]
__UpperCamelCase : Tuple =self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__UpperCamelCase : Union[str, Any] =self.sigmas[step_index - 1]
__UpperCamelCase : int =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCamelCase : Any =0
__UpperCamelCase : Union[str, Any] =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCamelCase : Optional[int] =sigma_hat if self.state_in_first_order else sigma_next
__UpperCamelCase : Tuple =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCamelCase : Dict =sigma_hat if self.state_in_first_order else sigma_next
__UpperCamelCase : Union[str, Any] =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__UpperCamelCase : Dict =model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
__UpperCamelCase : Any =pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCamelCase : int =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCamelCase : List[str] =sigma_next - sigma_hat
# store for 2nd order step
__UpperCamelCase : Optional[Any] =derivative
__UpperCamelCase : Optional[Any] =dt
__UpperCamelCase : Optional[int] =sample
else:
# 2. 2nd order / Heun's method
__UpperCamelCase : Any =(sample - pred_original_sample) / sigma_next
__UpperCamelCase : List[str] =(self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__UpperCamelCase : Optional[Any] =self.dt
__UpperCamelCase : Union[str, Any] =self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__UpperCamelCase : Optional[Any] =None
__UpperCamelCase : Union[str, Any] =None
__UpperCamelCase : str =None
__UpperCamelCase : str =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase__ ):
# mps does not support float64
__UpperCamelCase : Tuple =self.timesteps.to(original_samples.device , dtype=torch.floataa )
__UpperCamelCase : Tuple =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__UpperCamelCase : Optional[Any] =self.timesteps.to(original_samples.device )
__UpperCamelCase : Tuple =timesteps.to(original_samples.device )
__UpperCamelCase : List[str] =[self.index_for_timestep(lowerCamelCase__ , lowerCamelCase__ ) for t in timesteps]
__UpperCamelCase : Optional[int] =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCamelCase : List[str] =sigma.unsqueeze(-1 )
__UpperCamelCase : Tuple =original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 71 |
"""simple docstring"""
import os
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = os.path.dirname(os.path.realpath(__UpperCamelCase ) )
lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase , "triangle.txt" )
with open(__UpperCamelCase ) as f:
lowerCAmelCase_ : Optional[int] = f.readlines()
lowerCAmelCase_ : Union[str, Any] = []
for line in triangle:
lowerCAmelCase_ : Any = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__UpperCamelCase ) )
a.append(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase_ : Optional[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase_ : int = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCamelCase , __UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 241 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BioGptTokenizer
lowercase__ = False
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase_ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_))))
lowercase_ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""") as fp:
fp.write(json.dumps(lowerCAmelCase_))
with open(self.merges_file , """w""") as fp:
fp.write("""\n""".join(lowerCAmelCase_))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = """lower newer"""
lowercase_ = """lower newer"""
return input_text, output_text
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = BioGptTokenizer(self.vocab_file , self.merges_file)
lowercase_ = """lower"""
lowercase_ = ["""low""", """er</w>"""]
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokens + ["""<unk>"""]
lowercase_ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""")
lowercase_ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_)
lowercase_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_)
lowercase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_)
lowercase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
| 363 |
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=13 , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__ : List[Any]=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=37 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=10 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> str:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = num_stages
def a ( self : Tuple ) -> str:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Optional[int] ) -> str:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def a ( self : str ) -> List[Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=SCREAMING_SNAKE_CASE__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=SCREAMING_SNAKE_CASE__ , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
__lowerCAmelCase = UperNetForSemanticSegmentation(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def a ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Any = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[str] = False
def a ( self : int ) -> Optional[int]:
__lowerCAmelCase = UperNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : Dict ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : List[Any] ) -> Optional[Any]:
return
def a ( self : Any ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def a ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def a ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def a ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def a ( self : List[Any] ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def a ( self : str ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a ( self : List[Any] ) -> Any:
pass
def a ( self : Any ) -> int:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def a ( self : List[str] ) -> List[Any]:
pass
@slow
def a ( self : Optional[Any] ) -> Optional[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase_ ( ) -> Any:
'''simple docstring'''
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
__lowerCAmelCase = Image.open(snake_case_ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : Dict ) -> Dict:
__lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
def a ( self : Tuple ) -> List[str]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 229 | '''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def a ( self : int ) -> Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a ( self : List[Any] ) -> Any:
__lowerCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(SCREAMING_SNAKE_CASE__ ):
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , example_records[i] )
def a ( self : Tuple ) -> List[str]:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a ( self : List[str] ) -> List[str]: # checks what happens with missing columns
__lowerCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a ( self : Dict ) -> Optional[int]: # checks if the type can be inferred from the second record
__lowerCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = Dataset.from_list([] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 229 | 1 |
'''simple docstring'''
import os
import sys
UpperCAmelCase_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCAmelCase_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModel.__doc__ )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 61 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = BlipImageProcessor()
UpperCAmelCase__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCAmelCase__ = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
UpperCAmelCase__ = InstructBlipProcessor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **_UpperCAmelCase : Dict ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).qformer_tokenizer
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
UpperCAmelCase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = processor(text=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = qformer_tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.batch_decode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 61 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
SCREAMING_SNAKE_CASE_ : List[str] = logging.getLogger(__name__)
class a :
"""simple docstring"""
def __init__( self: List[str] ):
"""simple docstring"""
A__ = False
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
if not self.initialized:
A__ = RagRetriever(
UpperCamelCase , question_encoder_tokenizer=UpperCamelCase , generator_tokenizer=UpperCamelCase , index=UpperCamelCase , init_retrieval=UpperCamelCase , )
A__ = True
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
self.retriever.index.init_index()
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ , A__ = self.retriever._main_retrieve(UpperCamelCase , UpperCamelCase )
return doc_ids, retrieved_doc_embeds
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict , UpperCamelCase: List[Any]=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(UpperCamelCase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
UpperCamelCase , question_encoder_tokenizer=UpperCamelCase , generator_tokenizer=UpperCamelCase , index=UpperCamelCase , init_retrieval=UpperCamelCase , )
A__ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for worker in self.retrieval_workers
] )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: str ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
A__ , A__ = ray.get(random_worker.retrieve.remote(UpperCamelCase , UpperCamelCase ) )
else:
A__ , A__ = self._main_retrieve(UpperCamelCase , UpperCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase )
@classmethod
def UpperCamelCase ( cls: List[str] , UpperCamelCase: Any , UpperCamelCase: Any=None , **UpperCamelCase: List[Any] ):
"""simple docstring"""
return super(UpperCamelCase , cls ).get_tokenizers(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
@classmethod
def UpperCamelCase ( cls: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Tuple=None , **UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = kwargs.pop("""config""" , UpperCamelCase ) or RagConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
A__ = RagTokenizer.from_pretrained(UpperCamelCase , config=UpperCamelCase )
A__ = rag_tokenizer.question_encoder
A__ = rag_tokenizer.generator
if indexed_dataset is not None:
A__ = """custom"""
A__ = CustomHFIndex(config.retrieval_vector_size , UpperCamelCase )
else:
A__ = cls._build_index(UpperCamelCase )
return cls(
UpperCamelCase , question_encoder_tokenizer=UpperCamelCase , generator_tokenizer=UpperCamelCase , retrieval_workers=UpperCamelCase , index=UpperCamelCase , )
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 1 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_snake_case = logging.getLogger(__name__)
_snake_case = 'Hello world! cécé herlolip'
_snake_case = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = BertAbsConfig(
temp_dir=""".""" , finetune_bert=UpperCamelCase__ , large=UpperCamelCase__ , share_emb=UpperCamelCase__ , use_bert_emb=UpperCamelCase__ , encoder="""bert""" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
_a : List[Any] = torch.load(UpperCamelCase__ , lambda UpperCamelCase__ , UpperCamelCase__ : storage )
_a : str = AbsSummarizer(UpperCamelCase__ , torch.device("""cpu""" ) , UpperCamelCase__ )
original.eval()
_a : Any = BertAbsSummarizer(UpperCamelCase__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
_a : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
_a : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCamelCase__ )) )
_a : str = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
_a : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCamelCase__ )) )
_a : Optional[Any] = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_a : Any = encoder_input_ids
_a : Dict = decoder_input_ids
_a : Dict = None
_a : int = None
_a : List[Any] = None
_a : List[str] = None
_a : Optional[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_a : List[str] = original(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )[0]
_a : Optional[int] = original.generator(UpperCamelCase__ )
_a : int = new_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )[0]
_a : List[str] = new_model.generator(UpperCamelCase__ )
_a : Dict = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase__ ) )
_a : List[str] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase__ ) )
_a : Optional[Any] = torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_snake_case = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 362 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 324 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(__lowercase , __lowercase , __lowercase )
self.assertEqual(__lowercase , ['''c'''] )
self.assertEqual(__lowercase , [2] )
# Out indices set to match out features
lowerCAmelCase_ : str = get_aligned_output_features_output_indices(['''a''', '''c'''] , __lowercase , __lowercase )
self.assertEqual(__lowercase , ['''a''', '''c'''] )
self.assertEqual(__lowercase , [0, 2] )
# Out features set to match out indices
lowerCAmelCase_ : Union[str, Any] = get_aligned_output_features_output_indices(__lowercase , [0, 2] , __lowercase )
self.assertEqual(__lowercase , ['''a''', '''c'''] )
self.assertEqual(__lowercase , [0, 2] )
# Out features selected from negative indices
lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(__lowercase , [-3, -1] , __lowercase )
self.assertEqual(__lowercase , ['''a''', '''c'''] )
self.assertEqual(__lowercase , [-3, -1] )
def lowercase_ ( self ) -> Dict:
with self.assertRaises(__lowercase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , __lowercase )
# Out features must be a list
with self.assertRaises(__lowercase ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(__lowercase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(__lowercase ):
verify_out_features_out_indices(__lowercase , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(__lowercase ):
verify_out_features_out_indices(__lowercase , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(__lowercase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(__lowercase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(__lowercase ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Optional[Any] = BackboneMixin()
lowerCAmelCase_ : List[Any] = ["a", "b", "c"]
lowerCAmelCase_ : Optional[int] = ["a", "c"]
lowerCAmelCase_ : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCAmelCase_ : Optional[Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCAmelCase_ : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] ) | 262 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( _lowerCAmelCase ):
__A = "vit_mae"
def __init__( self : Any , lowercase : int=768 , lowercase : Tuple=12 , lowercase : str=12 , lowercase : Optional[Any]=3_072 , lowercase : List[Any]="gelu" , lowercase : Tuple=0.0 , lowercase : Union[str, Any]=0.0 , lowercase : str=0.02 , lowercase : Optional[int]=1e-1_2 , lowercase : List[Any]=224 , lowercase : str=16 , lowercase : List[str]=3 , lowercase : Optional[Any]=True , lowercase : int=16 , lowercase : Optional[Any]=512 , lowercase : Optional[Any]=8 , lowercase : Optional[Any]=2_048 , lowercase : List[str]=0.75 , lowercase : str=False , **lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Any = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :Optional[Any] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Optional[int] = hidden_act
lowercase_ :str = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :str = initializer_range
lowercase_ :Optional[int] = layer_norm_eps
lowercase_ :str = image_size
lowercase_ :Union[str, Any] = patch_size
lowercase_ :Dict = num_channels
lowercase_ :Any = qkv_bias
lowercase_ :Optional[int] = decoder_num_attention_heads
lowercase_ :Optional[Any] = decoder_hidden_size
lowercase_ :Union[str, Any] = decoder_num_hidden_layers
lowercase_ :List[Any] = decoder_intermediate_size
lowercase_ :Optional[Any] = mask_ratio
lowercase_ :Optional[Any] = norm_pix_loss
| 223 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> Dict:
A_ : int = parent
A_ : List[Any] = batch_size
A_ : Optional[Any] = image_size
A_ : List[str] = patch_size
A_ : Dict = num_channels
A_ : str = is_training
A_ : int = use_labels
A_ : List[str] = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : List[str] = type_sequence_label_size
A_ : str = initializer_range
A_ : Dict = scope
A_ : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Optional[int] = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 2
def UpperCAmelCase_ ( self ) -> Any:
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : List[str] = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> List[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Any = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : Optional[Any] = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Any = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Dict = 1
A_ : str = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Tuple = self.type_sequence_label_size
A_ : Dict = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : int = 1
A_ : Optional[int] = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : int = config_and_inputs
A_ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = DeiTModelTester(self )
A_ : Dict = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> str:
pass
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> str:
A_ : Dict = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : List[str] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : List[str] = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : List[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Any = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
A_ : str = problem_type["""title"""]
A_ : int = problem_type["""num_labels"""]
A_ : str = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
A_ : Union[str, Any] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : List[str] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
A_ : Union[str, Any] = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
A_ : List[Any] = self.default_image_processor
A_ : Tuple = prepare_img()
A_ : Dict = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : int = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : List[Any] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Any = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : Optional[Any] = model(_lowerCamelCase )
| 164 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> float:
"""simple docstring"""
A_ : Optional[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(a_ )] )
A_ : Optional[Any] = np.array(a_ )
A_ : Optional[int] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , a_ ) ) , x.transpose() ) , a_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> float:
"""simple docstring"""
A_ : List[str] = (1, 2, 1)
A_ : Tuple = (1, 1, 0, 7)
A_ : List[Any] = SARIMAX(
a_ , exog=a_ , order=a_ , seasonal_order=a_ )
A_ : Tuple = model.fit(disp=a_ , maxiter=6_0_0 , method="""nm""" )
A_ : List[Any] = model_fit.predict(1 , len(a_ ) , exog=[test_match] )
return result[0]
def UpperCAmelCase ( a_ , a_ , a_ ) -> float:
"""simple docstring"""
A_ : int = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(a_ , a_ )
A_ : Tuple = regressor.predict(a_ )
return y_pred[0]
def UpperCAmelCase ( a_ ) -> float:
"""simple docstring"""
train_user.sort()
A_ : Any = np.percentile(a_ , 2_5 )
A_ : Union[str, Any] = np.percentile(a_ , 7_5 )
A_ : str = qa - qa
A_ : List[Any] = qa - (iqr * 0.1)
return low_lim
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
A_ : Dict = 0
A_ : Optional[Any] = 0
for i in list_vote:
if i > actual_result:
A_ : Optional[Any] = not_safe + 1
else:
if abs(abs(a_ ) - abs(a_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCamelCase__ : List[str] = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
UpperCamelCase__ : Optional[Any] = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
UpperCamelCase__ : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCamelCase__ : List[Any] = normalize_df[:, 2].tolist()
UpperCamelCase__ : Tuple = normalize_df[:, 0].tolist()
UpperCamelCase__ : Union[str, Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCamelCase__ : Any = normalize_df[:, [1, 2]].tolist()
UpperCamelCase__ : Optional[int] = x[: len(x) - 1]
UpperCamelCase__ : Optional[Any] = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCamelCase__ : Optional[int] = total_date[: len(total_date) - 1]
UpperCamelCase__ : str = total_user[: len(total_user) - 1]
UpperCamelCase__ : Tuple = total_match[: len(total_match) - 1]
UpperCamelCase__ : List[str] = total_date[len(total_date) - 1 :]
UpperCamelCase__ : List[Any] = total_user[len(total_user) - 1 :]
UpperCamelCase__ : Dict = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCamelCase__ : List[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCamelCase__ : Tuple = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 164 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case_ : int = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , np.ndarray ):
return list(tensor.shape )
_UpperCamelCase : Any = tf.shape(UpperCAmelCase_ )
if tensor.shape == tf.TensorShape(UpperCAmelCase_ ):
return dynamic
_UpperCamelCase : Any = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase_ )]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase_ , name=UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=1E-5 , UpperCAmelCase_=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_UpperCamelCase , _UpperCamelCase : Any = tf.nn.moments(UpperCAmelCase_ , axes=[axis] , keepdims=UpperCAmelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_UpperCamelCase : str = [1] * inputs.shape.rank
_UpperCamelCase : List[str] = shape_list(UpperCAmelCase_ )[axis]
_UpperCamelCase : Optional[int] = tf.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : str = tf.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
# Compute layer normalization using the batch_normalization
# function.
_UpperCamelCase : str = tf.nn.batch_normalization(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , offset=UpperCAmelCase_ , scale=UpperCAmelCase_ , variance_epsilon=UpperCAmelCase_ , )
return outputs
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=0 , UpperCAmelCase_=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_UpperCamelCase : str = tf.shape(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_UpperCamelCase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , tf.Tensor ):
_UpperCamelCase : str = tf.convert_to_tensor(UpperCAmelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_UpperCamelCase : Union[str, Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_UpperCamelCase : List[str] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = "input_ids" ):
tf.debugging.assert_less(
UpperCAmelCase_ , tf.cast(UpperCAmelCase_ , dtype=tensor.dtype ) , message=(
f'The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase_ )}) must be smaller than the embedding '
f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_UpperCamelCase : Dict = [x for x in data if len(UpperCAmelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
f'bytes: {bad_attributes}' )
_UpperCamelCase : int = np.asarray(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : Optional[Any] = np.array_split(UpperCAmelCase_ , UpperCAmelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_UpperCamelCase : Optional[int] = np.array_split(UpperCAmelCase_ , UpperCAmelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = chunk_data
else:
_UpperCamelCase : List[str] = data
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
if name in group.attrs:
_UpperCamelCase : Tuple = [n.decode('utf8' ) if hasattr(UpperCAmelCase_ , 'decode' ) else n for n in group.attrs[name]]
else:
_UpperCamelCase : int = []
_UpperCamelCase : int = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(UpperCAmelCase_ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def A__ ( UpperCAmelCase_ ):
def _expand_single_ad_tensor(UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase_ )
| 83 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
lowercase__ = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
lowercase__ = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def A__ ( ):
_UpperCamelCase : Optional[Any] = HfArgumentParser((ModelArguments,) )
((_UpperCamelCase) , ) : Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_UpperCamelCase : Any = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_UpperCamelCase : str = decoder_config.decoder_start_token_id
_UpperCamelCase : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_UpperCamelCase : int = decoder_config.bos_token_id
if pad_token_id is None:
_UpperCamelCase : Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_UpperCamelCase : List[Any] = decoder_config.eos_token_id
_UpperCamelCase : Dict = decoder_start_token_id
_UpperCamelCase : int = pad_token_id
_UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 83 | 1 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__):
lowerCAmelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase_ = True
lowerCAmelCase_ = 'ml.p3.2xlarge'
lowerCAmelCase_ = 'accelerate_sagemaker_execution_role'
lowerCAmelCase_ = 'hf-sm'
lowerCAmelCase_ = 'us-east-1'
lowerCAmelCase_ = 1
lowerCAmelCase_ = 'accelerate-sagemaker-1'
lowerCAmelCase_ = '1.6'
lowerCAmelCase_ = '4.4'
lowerCAmelCase_ = 'train.py'
lowerCAmelCase_ = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
lowerCAmelCase_ = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['do_train'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['epochs'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['learning_rate'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['max_steps'] , _SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 364 |
'''simple docstring'''
from ... import PretrainedConfig
lowerCAmelCase : List[str] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ = """nezha"""
def __init__( self , A_=21128 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=64 , A_=2 , A_=0.02 , A_=1e-12 , A_=0.1 , A_=0 , A_=2 , A_=3 , A_=True , **A_ , )-> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 251 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _A ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(__UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = RegNetForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCamelCase__ : Dict = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : Dict = False
UpperCamelCase__ : str = False
UpperCamelCase__ : Any = False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def _A ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self ):
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=__UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _A ( self ):
'''simple docstring'''
def check_hidden_states_output(_A , _A , _A ):
__SCREAMING_SNAKE_CASE = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = RegNetModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __lowercase ( ) -> Tuple:
__SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**__UpperCAmelCase )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 257 | import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." )
parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." )
parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." )
UpperCAmelCase__ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, "r", encoding="utf8" ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(__A )} examples to process.""" )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 10_000
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(__A )} examples processed.""" )
UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(__A, "wb" ) as handle:
pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 65 | 0 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
UpperCamelCase__ : List[str] = datasets.logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n'
UpperCamelCase__ : Optional[int] = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
UpperCamelCase__ : Dict = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict:
if self.config_name == "default":
A_ : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
A_ : Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False ) -> str:
if gpus is None:
A_ : Union[str, Any] = 1 if torch.cuda.is_available() else 0
A_ : Dict = {"src": sources, "mt": predictions, "ref": references}
A_ : Union[str, Any] = [dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) for t in zip(*data.values() )]
A_ : str = self.scorer.predict(UpperCamelCase__ , gpus=UpperCamelCase__ , progress_bar=UpperCamelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 370 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 5_0_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
A_ : Union[str, Any] = set()
A_ : List[str] = int((limit - 2_4) ** (1 / 2) )
A_ : Dict = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
A_ : Union[str, Any] = primea * primea
for primea in primes:
A_ : Optional[int] = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
A_ : Tuple = primea * primea * primea * primea
A_ : List[str] = square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(f'{solution() = }')
| 164 | 0 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
A =logging.get_logger(__name__)
# General docstring
A ='PoolFormerConfig'
# Base docstring
A ='sail/poolformer_s12'
A =[1, 5_12, 7, 7]
# Image classification docstring
A ='sail/poolformer_s12'
A ='tabby, tabby cat'
A =[
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def snake_case_ (_a : List[str] , _a : float = 0.0 , _a : bool = False ):
if drop_prob == 0.0 or not training:
return input
UpperCAmelCase = 1 - drop_prob
UpperCAmelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
UpperCAmelCase = keep_prob + torch.rand(_a , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
UpperCAmelCase = input.div(_a ) * random_tensor
return output
class _a ( nn.Module ):
def __init__( self : str , lowercase : Optional[float] = None ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = drop_prob
def A ( self : Union[str, Any] , lowercase : torch.Tensor ):
'''simple docstring'''
return drop_path(lowercase , self.drop_prob , self.training )
def A ( self : Optional[Any] ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class _a ( nn.Module ):
def __init__( self : Optional[int] , lowercase : Optional[int] , lowercase : int , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : Tuple=None ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = patch_size if isinstance(lowercase , collections.abc.Iterable ) else (patch_size, patch_size)
UpperCAmelCase = stride if isinstance(lowercase , collections.abc.Iterable ) else (stride, stride)
UpperCAmelCase = padding if isinstance(lowercase , collections.abc.Iterable ) else (padding, padding)
UpperCAmelCase = nn.Convad(lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase )
UpperCAmelCase = norm_layer(lowercase ) if norm_layer else nn.Identity()
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.projection(lowercase )
UpperCAmelCase = self.norm(lowercase )
return embeddings
class _a ( nn.GroupNorm ):
def __init__( self : Optional[int] , lowercase : Optional[Any] , **lowercase : int ):
'''simple docstring'''
super().__init__(1 , lowercase , **lowercase )
class _a ( nn.Module ):
def __init__( self : Dict , lowercase : Dict ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = nn.AvgPoolad(lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase )
def A ( self : str , lowercase : Dict ):
'''simple docstring'''
return self.pool(lowercase ) - hidden_states
class _a ( nn.Module ):
def __init__( self : Any , lowercase : List[str] , lowercase : str , lowercase : Optional[int] , lowercase : Any ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = nn.Convad(lowercase , lowercase , 1 )
UpperCAmelCase = nn.Convad(lowercase , lowercase , 1 )
UpperCAmelCase = PoolFormerDropPath(lowercase )
if isinstance(config.hidden_act , lowercase ):
UpperCAmelCase = ACTaFN[config.hidden_act]
else:
UpperCAmelCase = config.hidden_act
def A ( self : List[str] , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = self.conva(lowercase )
UpperCAmelCase = self.act_fn(lowercase )
UpperCAmelCase = self.drop(lowercase )
UpperCAmelCase = self.conva(lowercase )
UpperCAmelCase = self.drop(lowercase )
return hidden_states
class _a ( nn.Module ):
def __init__( self : Tuple , lowercase : Optional[int] , lowercase : List[Any] , lowercase : Dict , lowercase : List[Any] , lowercase : Any , lowercase : List[Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = PoolFormerPooling(lowercase )
UpperCAmelCase = PoolFormerOutput(lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = PoolFormerGroupNorm(lowercase )
UpperCAmelCase = PoolFormerGroupNorm(lowercase )
# Useful for training neural nets
UpperCAmelCase = PoolFormerDropPath(lowercase ) if drop_path > 0.0 else nn.Identity()
UpperCAmelCase = config.use_layer_scale
if config.use_layer_scale:
UpperCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase )
UpperCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase )
def A ( self : Tuple , lowercase : str ):
'''simple docstring'''
if self.use_layer_scale:
UpperCAmelCase = self.pooling(self.before_norm(lowercase ) )
UpperCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
UpperCAmelCase = hidden_states + self.drop_path(lowercase )
UpperCAmelCase = ()
UpperCAmelCase = self.output(self.after_norm(lowercase ) )
UpperCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
UpperCAmelCase = hidden_states + self.drop_path(lowercase )
UpperCAmelCase = (output,) + outputs
return outputs
else:
UpperCAmelCase = self.drop_path(self.pooling(self.before_norm(lowercase ) ) )
# First residual connection
UpperCAmelCase = pooling_output + hidden_states
UpperCAmelCase = ()
# Second residual connection inside the PoolFormerOutput block
UpperCAmelCase = self.drop_path(self.output(self.after_norm(lowercase ) ) )
UpperCAmelCase = hidden_states + layer_output
UpperCAmelCase = (output,) + outputs
return outputs
class _a ( nn.Module ):
def __init__( self : Optional[int] , lowercase : Tuple ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = config
# stochastic depth decay rule
UpperCAmelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
UpperCAmelCase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
UpperCAmelCase = nn.ModuleList(lowercase )
# Transformer blocks
UpperCAmelCase = []
UpperCAmelCase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
UpperCAmelCase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowercase ) )
UpperCAmelCase = nn.ModuleList(lowercase )
def A ( self : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any]=False , lowercase : List[Any]=True ):
'''simple docstring'''
UpperCAmelCase = () if output_hidden_states else None
UpperCAmelCase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
UpperCAmelCase , UpperCAmelCase = layers
# Get patch embeddings from hidden_states
UpperCAmelCase = embedding_layer(lowercase )
# Send the embeddings through the blocks
for _, blk in enumerate(lowercase ):
UpperCAmelCase = blk(lowercase )
UpperCAmelCase = layer_outputs[0]
if output_hidden_states:
UpperCAmelCase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
class _a ( __a ):
__a : Optional[int] = PoolFormerConfig
__a : Dict = """poolformer"""
__a : Union[str, Any] = """pixel_values"""
__a : Dict = True
def A ( self : Tuple , lowercase : Any ):
'''simple docstring'''
if isinstance(lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def A ( self : str , lowercase : str , lowercase : Dict=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
UpperCAmelCase = value
A =r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
A =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , __a , )
class _a ( __a ):
def __init__( self : str , lowercase : Optional[Any] ):
'''simple docstring'''
super().__init__(lowercase )
UpperCAmelCase = config
UpperCAmelCase = PoolFormerEncoder(lowercase )
# Initialize weights and apply final processing
self.post_init()
def A ( self : int ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Tuple , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
UpperCAmelCase = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , )
UpperCAmelCase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=encoder_outputs.hidden_states , )
class _a ( nn.Module ):
def __init__( self : Optional[int] , lowercase : str ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = nn.Linear(config.hidden_size , config.hidden_size )
def A ( self : str , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.dense(lowercase )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , __a , )
class _a ( __a ):
def __init__( self : str , lowercase : List[Any] ):
'''simple docstring'''
super().__init__(lowercase )
UpperCAmelCase = config.num_labels
UpperCAmelCase = PoolFormerModel(lowercase )
# Final norm
UpperCAmelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
UpperCAmelCase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : List[Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = self.poolformer(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , )
UpperCAmelCase = outputs[0]
UpperCAmelCase = self.classifier(self.norm(lowercase ).mean([-2, -1] ) )
UpperCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase = '''single_label_classification'''
else:
UpperCAmelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCAmelCase = MSELoss()
if self.num_labels == 1:
UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase = CrossEntropyLoss()
UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase = BCEWithLogitsLoss()
UpperCAmelCase = loss_fct(lowercase , lowercase )
if not return_dict:
UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 34 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """nat"""
_lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCamelCase=4 , __lowerCamelCase=3 , __lowerCamelCase=64 , __lowerCamelCase=[3, 4, 6, 5] , __lowerCamelCase=[2, 4, 8, 16] , __lowerCamelCase=7 , __lowerCamelCase=3.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : Union[str, Any] = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = embed_dim
__A : Dict = depths
__A : str = len(__lowerCamelCase )
__A : Optional[Any] = num_heads
__A : str = kernel_size
__A : Any = mlp_ratio
__A : Optional[int] = qkv_bias
__A : str = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : int = hidden_act
__A : Any = layer_norm_eps
__A : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : int = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
__A : Union[str, Any] = layer_scale_init_value
__A : List[str] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
__A , __A : Any = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 179 | 0 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : List[Any] ,_UpperCAmelCase : str = "▁" ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Union[str, AddedToken] = "<unk>" ,_UpperCAmelCase : Union[str, AddedToken] = "</s>" ,_UpperCAmelCase : Union[str, AddedToken] = "<pad>" ,):
_a : Union[str, Any] = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
_a : Optional[Any] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_a : Optional[Any] = token_dict['token']
_a : Optional[int] = Tokenizer(Unigram() )
_a : List[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) ,' ' ),
normalizers.Lowercase(),
] )
_a : Optional[int] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
_a : int = decoders.Metaspace(replacement=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase )
_a : Optional[Any] = TemplateProcessing(
single=F"""$A {self.special_tokens['eos']['token']}""" ,special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] ,)
_a : str = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : Union[str, List[str]] ,_UpperCAmelCase : int = 8000 ,_UpperCAmelCase : bool = True ,):
_a : int = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_UpperCAmelCase ,)
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Union[str, Any] = [files]
self._tokenizer.train(_UpperCAmelCase ,trainer=_UpperCAmelCase )
self.add_unk_id()
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] ,_UpperCAmelCase : int = 8000 ,_UpperCAmelCase : bool = True ,):
_a : Tuple = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_UpperCAmelCase ,)
self._tokenizer.train_from_iterator(_UpperCAmelCase ,trainer=_UpperCAmelCase )
self.add_unk_id()
def __lowercase ( self : Any ):
_a : Optional[Any] = json.loads(self._tokenizer.to_str() )
_a : Any = self.special_tokens['unk']['id']
_a : int = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 107 |
'''simple docstring'''
__lowerCAmelCase = range(2, 20 + 1)
__lowerCAmelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase = {}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_a : Optional[int] = sum(a_i[j] for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) )
_a : List[str] = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) )
_a , _a : Any = 0, 0
_a : Any = n - i
_a : List[Any] = memo.get(lowerCAmelCase_ )
if sub_memo is not None:
_a : Tuple = sub_memo.get(lowerCAmelCase_ )
if jumps is not None and len(lowerCAmelCase_ ) > 0:
# find and make the largest jump without going over
_a : Any = -1
for _k in range(len(lowerCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_a : Any = _k
break
if max_jump >= 0:
_a , _a , _a : Tuple = jumps[max_jump]
# since the difference between jumps is cached, add c
_a : Union[str, Any] = diff + c
for j in range(min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) ):
_a , _a : Dict = divmod(lowerCAmelCase_ , 10 )
if new_c > 0:
add(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
_a : Tuple = []
else:
_a : Any = {c: []}
_a : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_a , _a : Dict = next_term(lowerCAmelCase_ , k - 1 , i + dn , lowerCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_a , _a : Any = compute(lowerCAmelCase_ , lowerCAmelCase_ , i + dn , lowerCAmelCase_ )
diff += _diff
dn += terms_jumped
_a : Tuple = sub_memo[c]
# keep jumps sorted by # of terms skipped
_a : Any = 0
while j < len(lowerCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if i >= n:
return 0, i
if k > len(lowerCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_a : Any = i
_a , _a , _a : Optional[int] = 0, 0, 0
for j in range(len(lowerCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_a : Any = ds_c + ds_b
diff += addend
_a : int = 0
for j in range(lowerCAmelCase_ ):
_a : Optional[Any] = a_i[j] + addend
_a , _a : Tuple = divmod(lowerCAmelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return diff, i - start_i
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
_a : Optional[Any] = digits[j] + addend
if s >= 10:
_a , _a : List[str] = divmod(lowerCAmelCase_ , 10 )
_a : List[str] = addend // 10 + quotient
else:
_a : Optional[Any] = s
_a : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_a , _a : List[str] = divmod(lowerCAmelCase_ , 10 )
digits.append(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 10**15 ) -> int:
_a : Dict = [1]
_a : int = 1
_a : Tuple = 0
while True:
_a , _a : str = next_term(lowerCAmelCase_ , 20 , i + dn , lowerCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_a : Union[str, Any] = 0
for j in range(len(lowerCAmelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 107 | 1 |
'''simple docstring'''
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
pass
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
pass
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> List[str]:
__lowerCamelCase : str = [
[],
[],
[],
]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(SCREAMING_SNAKE_CASE_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def lowercase_ ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ) -> str:
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Dict:
__lowerCamelCase : str = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> None:
if len(self.queue ) == 1_00:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> int:
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
__lowerCamelCase : List[Any] = min(self.queue )
self.queue.remove(SCREAMING_SNAKE_CASE_ )
return data
def __str__( self ) -> str:
return str(self.queue )
def UpperCAmelCase__ ( ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(UpperCAmelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(UpperCAmelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCAmelCase__ ( ) -> Dict:
__lowerCamelCase : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(UpperCAmelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(UpperCAmelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 185 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Dict = None
A__ : List[Any] = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A__ : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
A__ : Any = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
A__ : Dict = """▁"""
# Segments (not really needed)
A__ : List[str] = 0
A__ : List[Any] = 1
A__ : Union[str, Any] = 2
A__ : List[Any] = 3
A__ : str = 4
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = 'left'
lowerCamelCase : Optional[Any] = XLNetTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<sep>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<cls>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : int = do_lower_case
__lowerCamelCase : Optional[Any] = remove_space
__lowerCamelCase : int = keep_accents
__lowerCamelCase : Any = vocab_file
__lowerCamelCase : Any = False if not self.vocab_file else True
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : Optional[Any] = [self.sep_token_id]
__lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : int = [self.sep_token_id]
__lowerCamelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 185 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCAmelCase_ : Optional[Any] = TypeVar('T')
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True ) -> None:
a_ : dict[T, list[T]] = {} # dictionary of lists
a_ : Tuple = directed
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(SCREAMING_SNAKE_CASE__ )
self.adj_list[destination_vertex].append(SCREAMING_SNAKE_CASE__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a_ : Union[str, Any] = [destination_vertex]
a_ : List[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(SCREAMING_SNAKE_CASE__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(SCREAMING_SNAKE_CASE__ )
a_ : int = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a_ : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a_ : Any = [destination_vertex]
a_ : Optional[Any] = []
return self
def __repr__( self : Any ) -> str:
return pformat(self.adj_list )
| 120 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ : str = 1.054571817e-34 # unit of ℏ : J * s
UpperCAmelCase_ : Dict = 3e8 # unit of c : m * s^-1
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
a_ : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a_ : List[str] = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a_ : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[str] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowercase : Tuple = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__lowercase : Any = f"""{src_lang}-{tgt_lang}"""
__lowercase : Union[str, Any] = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__lowercase : Any = os.path.join(__UpperCamelCase , '''README.md''' )
print(f"""Generating {path}""" )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
a_ = Path(__file__).resolve().parent.parent.parent
a_ = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ , a_ , a_ = model_name.split('-')
a_ = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 249 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase =StableDiffusionDiffEditPipeline
UpperCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
UpperCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
UpperCamelCase =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase =frozenset([] )
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
__lowercase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowercase : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
__lowercase : Optional[int] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_zero=UpperCamelCase_ , )
torch.manual_seed(0 )
__lowercase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowercase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
__lowercase : Optional[int] = CLIPTextModel(UpperCamelCase_ )
__lowercase : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0 ) -> Any:
__lowercase : Any = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowercase : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('''mps''' ):
__lowercase : List[Any] = torch.manual_seed(UpperCamelCase_ )
else:
__lowercase : Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowercase : Any = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0 ) -> int:
__lowercase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowercase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase : List[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' )
if str(UpperCamelCase_ ).startswith('''mps''' ):
__lowercase : List[str] = torch.manual_seed(UpperCamelCase_ )
else:
__lowercase : List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowercase : int = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0 ) -> Union[str, Any]:
__lowercase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowercase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase : Any = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' )
if str(UpperCamelCase_ ).startswith('''mps''' ):
__lowercase : Optional[Any] = torch.manual_seed(UpperCamelCase_ )
else:
__lowercase : int = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowercase : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
__lowercase : Optional[int] = self.get_dummy_components()
__lowercase : List[str] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__lowercase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ )
__lowercase : Any = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
__lowercase : Tuple = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase_ , UpperCamelCase_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase : List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
__lowercase : Any = pipe_loaded(**UpperCamelCase_ )[0]
__lowercase : Any = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase_ , 1E-4 )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : int = '''cpu'''
__lowercase : Optional[int] = self.get_dummy_components()
__lowercase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : str = self.get_dummy_mask_inputs(UpperCamelCase_ )
__lowercase : int = pipe.generate_mask(**UpperCamelCase_ )
__lowercase : Any = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__lowercase : List[Any] = np.array([0] * 9 )
__lowercase : str = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowerCamelCase ( self ) -> str:
__lowercase : Optional[int] = '''cpu'''
__lowercase : Dict = self.get_dummy_components()
__lowercase : str = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : int = self.get_dummy_inversion_inputs(UpperCamelCase_ )
__lowercase : List[str] = pipe.invert(**UpperCamelCase_ ).images
__lowercase : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__lowercase : Any = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
__lowercase : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _lowerCamelCase ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _lowerCamelCase ( self ) -> str:
__lowercase : Union[str, Any] = '''cpu'''
__lowercase : str = self.get_dummy_components()
__lowercase : Any = {'''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''beta_schedule''': '''scaled_linear'''}
__lowercase : str = DPMSolverMultistepScheduler(**UpperCamelCase_ )
__lowercase : List[str] = DPMSolverMultistepInverseScheduler(**UpperCamelCase_ )
__lowercase : int = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : str = self.get_dummy_inversion_inputs(UpperCamelCase_ )
__lowercase : str = pipe.invert(**UpperCamelCase_ ).images
__lowercase : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__lowercase : Union[str, Any] = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
__lowercase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowerCamelCase ( cls ) -> Optional[Any]:
__lowercase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
__lowercase : Optional[int] = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
__lowercase : Any = raw_image
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : str = torch.manual_seed(0 )
__lowercase : Optional[int] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
__lowercase : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
__lowercase : Dict = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : Tuple = '''a bowl of fruit'''
__lowercase : int = '''a bowl of pears'''
__lowercase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase_ , target_prompt=UpperCamelCase_ , generator=UpperCamelCase_ , )
__lowercase : Dict = pipe.invert(
prompt=UpperCamelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase_ ).latents
__lowercase : Optional[int] = pipe(
prompt=UpperCamelCase_ , mask_image=UpperCamelCase_ , image_latents=UpperCamelCase_ , generator=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
__lowercase : int = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Union[str, Any] = torch.manual_seed(0 )
__lowercase : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
__lowercase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : List[str] = '''a bowl of fruit'''
__lowercase : Union[str, Any] = '''a bowl of pears'''
__lowercase : int = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase_ , target_prompt=UpperCamelCase_ , generator=UpperCamelCase_ , )
__lowercase : List[Any] = pipe.invert(
prompt=UpperCamelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase_ , num_inference_steps=25 , ).latents
__lowercase : Optional[int] = pipe(
prompt=UpperCamelCase_ , mask_image=UpperCamelCase_ , image_latents=UpperCamelCase_ , generator=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
__lowercase : Union[str, Any] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 249 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
@register_to_config
def __init__( self : List[str], a_ : int, a_ : int, a_ : int, a_ : float, a_ : int, a_ : int, a_ : int, a_ : int, a_ : str, a_ : bool = False, ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = nn.Embedding(a_, a_ )
UpperCamelCase__ = nn.Embedding(a_, a_ )
UpperCamelCase__ = False
UpperCamelCase__ = nn.Dropout(p=a_ )
UpperCamelCase__ = TaConfig(
vocab_size=a_, d_model=a_, num_heads=a_, d_kv=a_, d_ff=a_, dropout_rate=a_, feed_forward_proj=a_, is_decoder=a_, is_encoder_decoder=a_, )
UpperCamelCase__ = nn.ModuleList()
for lyr_num in range(a_ ):
UpperCamelCase__ = TaBlock(a_ )
self.encoders.append(a_ )
UpperCamelCase__ = TaLayerNorm(a_ )
UpperCamelCase__ = nn.Dropout(p=a_ )
def lowercase_ ( self : str, a_ : Any, a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.token_embedder(a_ )
UpperCamelCase__ = encoder_input_tokens.shape[1]
UpperCamelCase__ = torch.arange(a_, device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
UpperCamelCase__ = self.dropout_pre(a_ )
# inverted the attention mask
UpperCamelCase__ = encoder_input_tokens.size()
UpperCamelCase__ = self.get_extended_attention_mask(a_, a_ )
for lyr in self.encoders:
UpperCamelCase__ = lyr(a_, a_ )[0]
UpperCamelCase__ = self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask | 364 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 31 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class A__ :
def __init__( self : List[Any] , _UpperCAmelCase : int ) -> None:
"""simple docstring"""
__lowercase = value
__lowercase = None
__lowercase = None
class A__ :
def __init__( self : int , _UpperCAmelCase : Node ) -> None:
"""simple docstring"""
__lowercase = tree
def a__ ( self : Tuple , _UpperCAmelCase : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [torch.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [tf.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowercase = [tf.convert_to_tensor(_UpperCAmelCase )]
__lowercase = [torch.tensor(_UpperCAmelCase )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
| 325 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = BlipImageProcessor()
UpperCamelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
UpperCamelCase = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
UpperCamelCase = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def A ( self : List[Any] , **UpperCamelCase__ : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def A ( self : Dict , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def A ( self : int , **UpperCamelCase__ : Any ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def A ( self : List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
UpperCamelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(UpperCamelCase__ , return_tensors='np' )
UpperCamelCase = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=UpperCamelCase__ )
UpperCamelCase = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
UpperCamelCase = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 249 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 249 | 1 |
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 14 |
_lowerCAmelCase : Dict = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_lowerCAmelCase : str = ["a", "b", "c", "d", "e"]
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = start
# add current to visited
visited.append(_lowerCAmelCase )
UpperCAmelCase__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCAmelCase__ = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(_lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
UpperCAmelCase__ = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = topological_sort("a", [], [])
print(sort)
| 169 | 0 |
def A_ ( _lowerCAmelCase ) -> bool:
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
UpperCamelCase : List[str] = 4
UpperCamelCase : Dict = (1 << p) - 1
for _ in range(p - 2 ):
UpperCamelCase : Optional[int] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 360 |
from pathlib import Path
import numpy as np
from PIL import Image
def A_ ( _lowerCAmelCase ) -> np.ndarray:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def A_ ( _lowerCAmelCase ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> np.ndarray:
UpperCamelCase : Dict = np.zeros_like(_lowerCAmelCase )
UpperCamelCase : List[str] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCamelCase : Tuple = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCamelCase : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCamelCase : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__lowerCamelCase : int = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
__lowerCamelCase : Tuple = np.array(Image.open(lena_path))
# kernel to be applied
__lowerCamelCase : Optional[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__lowerCamelCase : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__lowerCamelCase : Any = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 140 | 0 |
import unittest
import numpy as np
def A (__A : np.ndarray , __A : np.ndarray , __A : np.ndarray , __A : np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ = np.shape(__A )
UpperCAmelCase_ = np.shape(__A )
UpperCAmelCase_ = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ = (
'''Expected the same number of rows for A and B. '''
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ = (
'''Expected the same number of columns for B and C. '''
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__A )
UpperCAmelCase_ = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
UpperCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]])
UpperCAmelCase_ = np.array([[2, 1], [6, 3]])
UpperCAmelCase_ = schur_complement(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = np.block([[a, b], [b.T, c]])
UpperCAmelCase_ = np.linalg.det(_snake_case)
UpperCAmelCase_ = np.linalg.det(_snake_case)
UpperCAmelCase_ = np.linalg.det(_snake_case)
self.assertAlmostEqual(_snake_case , det_a * det_s)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
UpperCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]])
UpperCAmelCase_ = np.array([[2, 1], [6, 3]])
with self.assertRaises(_snake_case):
schur_complement(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
UpperCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]])
UpperCAmelCase_ = np.array([[2, 1, 3], [6, 3, 5]])
with self.assertRaises(_snake_case):
schur_complement(_snake_case , _snake_case , _snake_case)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 51 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Dict = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1_6 , __UpperCAmelCase=True , __UpperCAmelCase=1_0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=1_2_8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Any = hidden_size
lowerCAmelCase__ :Optional[int] = num_hidden_layers
lowerCAmelCase__ :Any = num_attention_heads
lowerCAmelCase__ :Optional[Any] = intermediate_size
lowerCAmelCase__ :Tuple = hidden_act
lowerCAmelCase__ :List[Any] = hidden_dropout_prob
lowerCAmelCase__ :Dict = attention_probs_dropout_prob
lowerCAmelCase__ :List[str] = initializer_range
lowerCAmelCase__ :Dict = layer_norm_eps
lowerCAmelCase__ :Any = patch_size
lowerCAmelCase__ :Optional[int] = qkv_bias
lowerCAmelCase__ :Any = frequency_stride
lowerCAmelCase__ :int = time_stride
lowerCAmelCase__ :Optional[int] = max_length
lowerCAmelCase__ :List[str] = num_mel_bins
| 254 |
"""simple docstring"""
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
order.append(_SCREAMING_SNAKE_CASE )
return order
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return component
def __A (_SCREAMING_SNAKE_CASE ) ->list[list[int]]:
"""simple docstring"""
lowerCAmelCase__ :Any = len(_SCREAMING_SNAKE_CASE ) * [False]
lowerCAmelCase__ :dict[int, list[int]] = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = []
for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Dict = order[len(_SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
lowerCAmelCase__ :Union[str, Any] = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
components_list.append(_SCREAMING_SNAKE_CASE )
return components_list
| 254 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__UpperCAmelCase = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__UpperCAmelCase = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__UpperCAmelCase = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case_ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 4, ) -> Any:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCAmelCase, hypotheses=__UpperCAmelCase, min_len=__UpperCAmelCase, max_len=__UpperCAmelCase )
}
| 119 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowerCamelCase_ = datasets.logging.get_logger(__name__)
lowerCamelCase_ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowerCamelCase_ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowerCamelCase_ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
if self.config_name == "default":
_A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
_A = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ):
'''simple docstring'''
if gpus is None:
_A = 1 if torch.cuda.is_available() else 0
_A = {"src": sources, "mt": predictions, "ref": references}
_A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )]
_A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 79 | 0 |
'''simple docstring'''
from statistics import mean, stdev
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = min(_UpperCamelCase )
A_ = max(_UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min), _UpperCamelCase ) for x in data]
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = mean(_UpperCamelCase )
A_ = stdev(_UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma), _UpperCamelCase ) for x in data]
| 18 | '''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 18 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE_ : List[str] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE_ : int = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : str = Accelerator()
lowerCAmelCase : List[Any] = (accelerator.state.process_index + 2, 10)
lowerCAmelCase : Tuple = torch.randint(0, 10, shape).to(accelerator.device)
lowerCAmelCase : int = ''
lowerCAmelCase : Any = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCAmelCase : Tuple = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCAmelCase : str = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 253 | '''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__a = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__a = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__a = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__a = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__a = ''
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict", [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def __UpperCAmelCase ( a_: Any, a_: Any ):
assert ReadMe.from_string(a_, a_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def __UpperCAmelCase ( a_: Optional[int], a_: int ):
with pytest.raises(a_, match=re.escape(expected_error.format(path="root" ) ) ):
_UpperCAmelCase : Union[str, Any] = ReadMe.from_string(a_, a_ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: str, a_: Optional[Any] ):
with pytest.raises(a_, match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(a_, a_ )
@pytest.mark.parametrize(
"readme_md,", [
(README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: str ):
ReadMe.from_string(a_, a_, suppress_parsing_errors=a_ )
@pytest.mark.parametrize(
"readme_md, expected_dict", [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def __UpperCAmelCase ( a_: str, a_: Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Tuple = ReadMe.from_readme(a_, a_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def __UpperCAmelCase ( a_: List[Any], a_: str ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Any = expected_error.format(path=a_ )
with pytest.raises(a_, match=re.escape(a_ ) ):
_UpperCAmelCase : str = ReadMe.from_readme(a_, a_ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: Tuple, a_: Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[str] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Any = expected_error.format(path=a_ )
with pytest.raises(a_, match=re.escape(a_ ) ):
ReadMe.from_readme(a_, a_ )
@pytest.mark.parametrize(
"readme_md,", [
(README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
ReadMe.from_readme(a_, a_, suppress_parsing_errors=a_ ) | 145 | 0 |
from __future__ import annotations
def _a ( a :list , a :int , a :int , a :int ) -> list:
a = []
a , a = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
a = result + left + right
return input_list
def _a ( a :list ) -> list:
if len(a ) <= 1:
return input_list
a = list(a )
# iteration for two-way merging
a = 2
while p <= len(a ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(a ) , a ):
a = i
a = i + p - 1
a = (low + high + 1) // 2
a = merge(a , a , a , a )
# final merge of last two parts
if p * 2 >= len(a ):
a = i
a = merge(a , 0 , a , len(a ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
UpperCAmelCase__ = []
else:
UpperCAmelCase__ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 364 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase__ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase__ = 10
UpperCAmelCase__ = 256
def _a ( a :List[str] ) -> Optional[MinHash]:
if len(a ) < MIN_NUM_TOKENS:
return None
a = MinHash(num_perm=a )
for token in set(a ):
min_hash.update(token.encode() )
return min_hash
def _a ( a :str ) -> Set[str]:
return {t for t in NON_ALPHA.split(a ) if len(t.strip() ) > 0}
class lowercase_ :
'''simple docstring'''
def __init__( self : Any , *,
__UpperCAmelCase : float = 0.85 , ) ->Dict:
"""simple docstring"""
a = duplication_jaccard_threshold
a = NUM_PERM
a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a = defaultdict(__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : MinHash ) ->None:
"""simple docstring"""
a = self._index.query(__UpperCAmelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->List[List[Dict]]:
"""simple docstring"""
a = []
for base, duplicates in self._duplicate_clusters.items():
a = [base] + list(__UpperCAmelCase )
# reformat the cluster to be a list of dict
a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__UpperCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict ) ->None:
"""simple docstring"""
a = self.get_duplicate_clusters()
with open(__UpperCAmelCase , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def _a ( a :List[Any] ) -> List[Any]:
a , a = element
a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _a ( a :Type[Dataset] ) -> List[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def _a ( a :Type[Dataset] , a :float ) -> str:
a = DuplicationIndex(duplication_jaccard_threshold=a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a ) ) , max_queue_size=100 ) ):
di.add(a , a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _a ( a :str , a :str ) -> float:
a = get_tokens(a )
a = get_tokens(a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase__ = None
def _a ( a :Tuple , a :Tuple ) -> Any:
a = []
for elementa in cluster:
a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(a , a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a = 1
extremes.append(a )
return extremes
def _a ( a :List[Any] , a :Optional[Any] , a :Union[str, Any] ) -> Optional[int]:
global _shared_dataset
a = dataset
a = []
a = partial(_find_cluster_extremes_shared , jaccard_threshold=a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a , a , ) , total=len(a ) , ):
extremes_list.append(a )
return extremes_list
def _a ( a :Type[Dataset] , a :float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
a = make_duplicate_clusters(a , a )
a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
a = {}
a = find_extremes(a , a , a )
for extremes in extremes_clusters:
for element in extremes:
a = element
a = duplicate_indices - set(extreme_dict.keys() )
a = dataset.filter(lambda a , a : idx not in remove_indices , with_indices=a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
a = extreme_dict[element['''base_index''']]['''copies''']
print(F"""Original dataset size: {len(a )}""" )
print(F"""Number of duplicate clusters: {len(a )}""" )
print(F"""Files in duplicate cluster: {len(a )}""" )
print(F"""Unique files in duplicate cluster: {len(a )}""" )
print(F"""Filtered dataset size: {len(a )}""" )
return ds_filter, duplicate_clusters
| 26 | 0 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowerCamelCase__ : Any = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCAmelCase ) )
return round(UpperCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_A : int = 25_00_04
_A : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = MBartTokenizer
_UpperCAmelCase : List[Any] = MBartTokenizerFast
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Optional[int] = True
def __lowerCamelCase ( self : Union[str, Any] ) ->Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Optional[Any] = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self : Tuple ) ->List[str]:
lowerCamelCase__ : str = MBartTokenizer(A , keep_accents=A )
lowerCamelCase__ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCamelCase__ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase__ : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : Any = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : Any = self.tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(A )
lowerCamelCase__ : List[Any] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] = tokenizer_r.from_pretrained(A )
lowerCamelCase__ : Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : List[Any] = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
lowerCamelCase__ : str = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[Any] = tokenizer_r.from_pretrained(A )
lowerCamelCase__ : List[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[Any] = tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
lowerCamelCase__ : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any = tokenizer_r.from_pretrained(A )
lowerCamelCase__ : Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Any = "facebook/mbart-large-en-ro"
_UpperCAmelCase : Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_UpperCAmelCase : Optional[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_UpperCAmelCase : Tuple = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __lowerCamelCase ( cls : Optional[Any] ) ->Dict:
lowerCamelCase__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCamelCase__ : int = 1
return cls
def __lowerCamelCase ( self : int ) ->Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def __lowerCamelCase ( self : str ) ->Any:
lowerCamelCase__ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def __lowerCamelCase ( self : Tuple ) ->Tuple:
self.assertIn(A , self.tokenizer.all_special_ids )
lowerCamelCase__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCamelCase__ : str = self.tokenizer.decode(A , skip_special_tokens=A )
lowerCamelCase__ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def __lowerCamelCase ( self : Optional[Any] ) ->int:
lowerCamelCase__ : List[str] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , A )
lowerCamelCase__ : str = 1_0
lowerCamelCase__ : Dict = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def __lowerCamelCase ( self : List[str] ) ->str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
lowerCamelCase__ : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
lowerCamelCase__ : List[Any] = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
lowerCamelCase__ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors='''pt''' )
lowerCamelCase__ : str = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __lowerCamelCase ( self : Any ) ->List[str]:
lowerCamelCase__ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCamelCase__ : Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCamelCase__ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __lowerCamelCase ( self : Any ) ->List[str]:
lowerCamelCase__ : str = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors='''pt''' )
lowerCamelCase__ : Any = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=1_0 , return_tensors='''pt''' )
lowerCamelCase__ : str = targets['''input_ids''']
lowerCamelCase__ : int = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[Any]:
lowerCamelCase__ : Dict = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 142 | 1 |
import math
class __lowerCamelCase :
def __init__( self , lowerCamelCase=0 ) -> int: # a graph with Node 0,1,...,N-1
snake_case_ = n
snake_case_ = [
[math.inf for j in range(0 , _a )] for i in range(0 , _a )
] # adjacency matrix for weight
snake_case_ = [
[math.inf for j in range(0 , _a )] for i in range(0 , _a )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
snake_case_ = w
def lowerCAmelCase_ ( self ) -> str:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
snake_case_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 365 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[Any] = 'mobilenet_v2'
def __init__( self , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=1.0 , lowerCamelCase=8 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=32 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu6" , lowerCamelCase=True , lowerCamelCase=0.8 , lowerCamelCase=0.02 , lowerCamelCase=0.001 , lowerCamelCase=255 , **lowerCamelCase , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = depth_divisible_by
snake_case_ = min_depth
snake_case_ = expand_ratio
snake_case_ = output_stride
snake_case_ = first_layer_is_expansion
snake_case_ = finegrained_output
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = semantic_loss_ignore_index
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1e-4 | 34 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase (a_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _lowerCamelCase ( snake_case__ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
| 128 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("""num_inference_steps""", 50),)
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = {"num_train_timesteps": 1000}
config.update(**snake_case__ )
return config
def _lowerCamelCase ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config(**snake_case__ )
UpperCamelCase_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
UpperCamelCase_ = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
UpperCamelCase_ = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
def _lowerCamelCase ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase_ = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
UpperCamelCase_ = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(**snake_case__ )
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = 10
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = model(snake_case__ , snake_case__ )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = model(snake_case__ , snake_case__ )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
UpperCamelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.timesteps[5]
UpperCamelCase_ = scheduler.timesteps[6]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ , time_step=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=snake_case__ , time_step=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.full_loop()
UpperCamelCase_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 128 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase = "true"
def _a ( _lowerCamelCase , _lowerCamelCase=82 , _lowerCamelCase=16 ) -> str:
"""simple docstring"""
set_seed(42 )
__snake_case : int = RegressionModel()
__snake_case : Any = deepcopy(_lowerCamelCase )
__snake_case : List[str] = RegressionDataset(length=_lowerCamelCase )
__snake_case : Any = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase )
model.to(accelerator.device )
__snake_case , __snake_case : List[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
return model, ddp_model, dataloader
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
__snake_case : List[Any] = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_lowerCamelCase ):
__snake_case : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
with accelerator.main_process_first():
__snake_case : Union[str, Any] = dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
__snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCamelCase ):
if use_longest:
return tokenizer.pad(_lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=16 )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Dict = Accelerator(dispatch_batches=_lowerCamelCase , split_batches=_lowerCamelCase )
__snake_case : List[str] = get_dataloader(_lowerCamelCase , not dispatch_batches )
__snake_case : int = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_lowerCamelCase )
__snake_case , __snake_case : Any = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Tuple = []
for batch in dataloader:
__snake_case , __snake_case : Dict = batch.values()
with torch.no_grad():
__snake_case : Optional[Any] = model(_lowerCamelCase )
__snake_case , __snake_case : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__snake_case , __snake_case : str = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCamelCase )
targs.append(_lowerCamelCase )
__snake_case , __snake_case : Optional[Any] = torch.cat(_lowerCamelCase ), torch.cat(_lowerCamelCase )
return logits, targs
def _a ( _lowerCamelCase , _lowerCamelCase=82 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=16 ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : Optional[int] = get_basic_setup(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Optional[Any] = generate_predictions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
assert (
len(_lowerCamelCase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCamelCase )}'''
def _a ( _lowerCamelCase = False , _lowerCamelCase = False ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
__snake_case , __snake_case : Dict = get_mrpc_setup(_lowerCamelCase , _lowerCamelCase )
# First do baseline
__snake_case , __snake_case , __snake_case : Optional[Any] = setup["""no"""]
model.to(_lowerCamelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCamelCase )
with torch.inference_mode():
__snake_case : List[Any] = model(**_lowerCamelCase )
__snake_case : Tuple = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCamelCase , references=batch["""labels"""] )
__snake_case : Optional[int] = metric.compute()
# Then do distributed
__snake_case , __snake_case , __snake_case : Optional[int] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__snake_case : Union[str, Any] = model(**_lowerCamelCase )
__snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
__snake_case : List[str] = batch["""labels"""]
__snake_case , __snake_case : Optional[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCamelCase , references=_lowerCamelCase )
__snake_case : Union[str, Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _a ( ) -> str:
"""simple docstring"""
__snake_case : str = Accelerator(split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_lowerCamelCase , _lowerCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__snake_case : Tuple = Accelerator(split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_lowerCamelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
__snake_case : Optional[Any] = Accelerator()
test_torch_metrics(_lowerCamelCase , 512 )
accelerator.state._reset_state()
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 13 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13 | 1 |
UpperCamelCase = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[int] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
lowercase__ : Optional[Any] = 0
lowercase__ : Any = 0
while place < len(_lowerCamelCase):
if (place + 1 < len(_lowerCamelCase)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Union[str, Any] = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) : Union[str, Any] = divmod(_lowerCamelCase , _lowerCamelCase)
result.append(roman * factor)
if number == 0:
break
return "".join(_lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Optional[int]:
super().setUp()
lowercase__ : List[Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
lowercase__ : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Optional[Any] = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
lowercase__ : Dict = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : str = '''adapt act apte'''
lowercase__ : Any = '''adapt act apte'''
return input_text, output_text
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Tuple = '''adapt act apte'''
lowercase__ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Any = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowercase__ : int = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
lowercase__ : str = '''I am a small frog.'''
lowercase__ : Union[str, Any] = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids''']
lowercase__ : List[str] = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
lowercase__ : Optional[Any] = '''I am a small frog .'''
lowercase__ : Any = '''.'''
lowercase__ : List[Any] = tok(__lowerCAmelCase )['''input_ids''']
lowercase__ : Optional[Any] = tok(__lowerCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 198 | 0 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCAmelCase = 1_0
def UpperCAmelCase_ (__a : int , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
for i in range(__a , __a ):
if array[i] == target:
return i
return -1
def UpperCAmelCase_ (__a : list[int] , __a : int ):
"""simple docstring"""
_a : Union[str, Any] = 0
_a : List[Any] = len(__a )
while left <= right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
_a : Tuple = (left + right) // 3 + 1
_a : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_a : Optional[int] = one_third - 1
elif array[two_third] < target:
_a : Tuple = two_third + 1
else:
_a : Optional[Any] = one_third + 1
_a : Any = two_third - 1
else:
return -1
def UpperCAmelCase_ (__a : int , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
_a : List[str] = (left + right) // 3 + 1
_a : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__a , one_third - 1 , __a , __a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __a , __a , __a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __a , __a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
__lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__lowerCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
__lowerCAmelCase = ite_ternary_search(collection, target)
__lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 5 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A_ ( self ):
_lowerCamelCase : List[Any] = ort.SessionOptions()
_lowerCamelCase : Any = False
return options
def A_ ( self ):
_lowerCamelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_lowerCamelCase : Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Dict = 'A red cat sitting on a park bench'
_lowerCamelCase : str = np.random.RandomState(0 )
_lowerCamelCase : Optional[Any] = pipe(
prompt=lowercase , image=lowercase , mask_image=lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase , output_type='np' , )
_lowerCamelCase : Union[str, Any] = output.images
_lowerCamelCase : List[str] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCamelCase : Any = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A_ ( self ):
_lowerCamelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_lowerCamelCase : str = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
_lowerCamelCase : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Dict = 'A red cat sitting on a park bench'
_lowerCamelCase : str = np.random.RandomState(0 )
_lowerCamelCase : str = pipe(
prompt=lowercase , image=lowercase , mask_image=lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase , output_type='np' , )
_lowerCamelCase : Any = output.images
_lowerCamelCase : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCamelCase : int = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 96 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Any = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
__UpperCamelCase : Optional[Any] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def a_ ( _A , _A=1 , _A=256 ) -> str:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def a_ ( _A ) -> int:
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def a_ ( _A , _A ) -> int:
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def a_ ( _A , _A , _A , _A=True ) -> List[str]:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
snake_case__ = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
snake_case__ = read_json(os.path.join(_A , 'params.json' ) )
snake_case__ = NUM_SHARDS[model_size]
snake_case__ = params['n_layers']
snake_case__ = params['n_heads']
snake_case__ = n_heads // num_shards
snake_case__ = params['dim']
snake_case__ = dim // n_heads
snake_case__ = 10000.0
snake_case__ = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case__ = params['n_kv_heads'] # for GQA / MQA
snake_case__ = n_heads_per_shard // num_key_value_heads
snake_case__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case__ = n_heads
snake_case__ = n_heads_per_shard
snake_case__ = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case__ = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
snake_case__ = [
torch.load(os.path.join(_A , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' )
for i in range(_A )
]
snake_case__ = 0
snake_case__ = {'weight_map': {}}
for layer_i in range(_A ):
snake_case__ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case__ = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case__ = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case__ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
snake_case__ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
snake_case__ = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_A )] , dim=1 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_A )] , dim=0 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_A )] , dim=1 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_A )] , dim=0 )
snake_case__ = inv_freq
for k, v in state_dict.items():
snake_case__ = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
snake_case__ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case__ = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
snake_case__ = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case__ = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
snake_case__ = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
snake_case__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
snake_case__ = params['multiple_of'] if 'multiple_of' in params else 256
snake_case__ = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
snake_case__ = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def a_ ( _A , _A ) -> Tuple:
"""simple docstring"""
# Initialize the tokenizer based on the `spm` model
snake_case__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case__ = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def a_ ( ) -> str:
"""simple docstring"""
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
snake_case__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case__ = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 307 | 0 |
from collections import defaultdict
class A_ :
'''simple docstring'''
def __init__( self: Tuple , a: int , a: Any ):
__lowerCamelCase : str = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCamelCase : Any = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(a ) )
]
__lowerCamelCase : List[str] = defaultdict(a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCamelCase : Any = (1 << len(a )) - 1
def _snake_case ( self: Dict , a: str , a: Any ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCamelCase : str = self.count_ways_until(a , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowerCamelCase : Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def _snake_case ( self: Tuple , a: Any ):
# Store the list of persons for each task
for i in range(len(a ) ):
for j in task_performed[i]:
self.task[j].append(a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowercase_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowercase_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 194 |
import datasets
lowercase_ = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
lowercase_ = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
lowercase_ = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self: Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def _snake_case ( self: int , a: Optional[Any] , a: Optional[Any] ):
return {"accuracy": simple_accuracy(a , a )}
| 194 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """hidden_sizes"""))
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """num_attention_heads"""))
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """num_encoder_blocks"""))
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=6_4 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=[2, 2, 2, 2] , lowerCAmelCase__=[8, 4, 2, 1] , lowerCAmelCase__=[1_6, 3_2, 6_4, 1_2_8] , lowerCAmelCase__=[1, 4, 8, 1_6] , lowerCAmelCase__=[1, 2, 4, 8] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_encoder_blocks
__SCREAMING_SNAKE_CASE = sr_ratios
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = downsampling_rates
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = SegformerModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertGreater(result.loss , 0.0)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : int = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowercase : int = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Tuple = True
__lowercase : List[str] = False
__lowercase : Optional[int] = False
__lowercase : Tuple = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SegformerModelTester(self)
__SCREAMING_SNAKE_CASE = SegformerConfigTester(self , config_class=lowerCAmelCase__)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCAmelCase__)
@unittest.skip("""SegFormer does not use inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
__SCREAMING_SNAKE_CASE = sum(self.model_tester.depths)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# verify the first attentions (first block, first layer)
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 4) ** 2
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 3_2) ** 2
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
self.assertEqual(out_len + 1 , len(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# verify the first attentions (first block, first layer)
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 4) ** 2
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def snake_case_ ( self):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
if not self.model_tester.is_training:
return
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__):
continue
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def snake_case_ ( self):
pass
@slow
def snake_case_ ( self):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = SegformerModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
# only resize + normalize
__SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""").to(
lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""")
__SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(lowerCAmelCase__)
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def snake_case_ ( self):
# only resize + normalize
__SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""").to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""")
__SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(lowerCAmelCase__)
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-1))
@slow
def snake_case_ ( self):
# only resize + normalize
__SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""").to(
lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""")
__SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(lowerCAmelCase__)
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
__SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(5_0_0, 3_0_0)])
__SCREAMING_SNAKE_CASE = torch.Size((5_0_0, 3_0_0))
self.assertEqual(segmentation[0].shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.Size((1_2_8, 1_2_8))
self.assertEqual(segmentation[0].shape , lowerCAmelCase__)
| 100 |
"""simple docstring"""
from __future__ import annotations
import math
def A ( snake_case__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = str(snake_case__ )
SCREAMING_SNAKE_CASE__ = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def A ( snake_case__ ):
'''simple docstring'''
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def A ( snake_case__ = 11 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
SCREAMING_SNAKE_CASE__ = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def A ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(11)) = }')
| 165 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( _UpperCAmelCase ):
'''simple docstring'''
__snake_case = """blenderbot-small"""
__snake_case = ["""past_key_values"""]
__snake_case = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Optional[int] , a: Any=5_0265 , a: Tuple=512 , a: Tuple=8 , a: Optional[int]=2048 , a: Optional[int]=16 , a: Dict=8 , a: str=2048 , a: List[Any]=16 , a: Dict=0.0 , a: int=0.0 , a: Optional[Any]=True , a: Tuple=True , a: List[Any]="gelu" , a: Optional[int]=512 , a: Tuple=0.1 , a: List[str]=0.0 , a: int=0.0 , a: Dict=0.0_2 , a: Any=1 , a: Any=False , a: List[Any]=0 , a: Tuple=1 , a: int=2 , a: Tuple=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : Optional[int] = encoder_ffn_dim
__lowerCamelCase : List[Any] = encoder_layers
__lowerCamelCase : List[str] = encoder_attention_heads
__lowerCamelCase : List[Any] = decoder_ffn_dim
__lowerCamelCase : Optional[Any] = decoder_layers
__lowerCamelCase : List[str] = decoder_attention_heads
__lowerCamelCase : int = dropout
__lowerCamelCase : Optional[int] = attention_dropout
__lowerCamelCase : int = activation_dropout
__lowerCamelCase : int = activation_function
__lowerCamelCase : Any = init_std
__lowerCamelCase : str = encoder_layerdrop
__lowerCamelCase : List[str] = decoder_layerdrop
__lowerCamelCase : Optional[int] = use_cache
__lowerCamelCase : Any = encoder_layers
__lowerCamelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class A_ ( _UpperCAmelCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase : Any = {0: """batch"""}
__lowerCamelCase : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__lowerCamelCase : List[str] = {0: """batch""", 1: """decoder_sequence"""}
__lowerCamelCase : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCamelCase : List[str] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase : Optional[Any] = self.num_layers
for i in range(lowercase_ ):
__lowerCamelCase : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
__lowerCamelCase : Any = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__lowerCamelCase : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _snake_case ( self: str ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : List[str] = super().outputs
else:
__lowerCamelCase : str = super(lowercase_ , self ).outputs
if self.use_past:
__lowerCamelCase : int = self.num_layers
for i in range(lowercase_ ):
__lowerCamelCase : str = {0: """batch""", 2: """past_sequence + sequence"""}
__lowerCamelCase : str = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _snake_case ( self: int , a: PreTrainedTokenizer , a: int = -1 , a: int = -1 , a: bool = False , a: Optional[TensorType] = None , ):
__lowerCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
__lowerCamelCase : Dict = seq_length if not self.use_past else 1
__lowerCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__lowerCamelCase : Any = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowerCamelCase : Optional[int] = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCamelCase : Optional[int] = common_inputs["""input_ids"""].shape
__lowerCamelCase : str = common_inputs["""decoder_input_ids"""].shape[1]
__lowerCamelCase : Optional[int] = self.num_attention_heads
__lowerCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Dict = decoder_seq_length + 3
__lowerCamelCase : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCamelCase : Optional[int] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
__lowerCamelCase : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCamelCase : Dict = self.num_layers
__lowerCamelCase : Any = min(lowercase_ , lowercase_ )
__lowerCamelCase : Optional[Any] = max(lowercase_ , lowercase_ ) - min_num_layers
__lowerCamelCase : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
__lowerCamelCase : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def _snake_case ( self: Any , a: PreTrainedTokenizer , a: int = -1 , a: int = -1 , a: bool = False , a: Optional[TensorType] = None , ):
__lowerCamelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCamelCase : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCamelCase : List[str] = seqlen + 2
__lowerCamelCase : List[str] = self.num_layers
__lowerCamelCase : Optional[Any] = self.num_attention_heads
__lowerCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Optional[Any] = common_inputs["""attention_mask"""].dtype
__lowerCamelCase : List[Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
__lowerCamelCase : List[Any] = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def _snake_case ( self: List[str] , a: PreTrainedTokenizer , a: int = -1 , a: int = -1 , a: bool = False , a: Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase : Tuple = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase : Dict = tokenizer.num_special_tokens_to_add(lowercase_ )
__lowerCamelCase : Optional[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase : List[str] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCamelCase : str = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def _snake_case ( self: int , a: PreTrainedTokenizer , a: int = -1 , a: int = -1 , a: bool = False , a: Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
elif self.task == "causal-lm":
__lowerCamelCase : List[Any] = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
__lowerCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def _snake_case ( self: int , a: Union[str, Any] , a: List[Any] , a: Union[str, Any] , a: str ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : Tuple = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
__lowerCamelCase : List[Any] = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 371 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__lowerCamelCase : List[str] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Tuple , a: NestedDataStructureLike[PathLike] , a: Optional[NamedSplit] = None , a: Optional[Features] = None , a: str = None , a: bool = False , a: bool = False , a: Optional[int] = None , **a: Optional[Any] , ):
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
__lowerCamelCase : List[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
__lowerCamelCase : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCamelCase : List[str] = Parquet(
cache_dir=a , data_files=a , features=a , hash=a , **a , )
def _snake_case ( self: List[str] ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : str = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
__lowerCamelCase : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Dataset , a: Union[PathLike, BinaryIO] , a: Optional[int] = None , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : List[Any] = path_or_buf
__lowerCamelCase : List[str] = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase : List[Any] = parquet_writer_kwargs
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCamelCase : Optional[int] = self._write(file_obj=a , batch_size=a , **self.parquet_writer_kwargs )
else:
__lowerCamelCase : Any = self._write(file_obj=self.path_or_buf , batch_size=a , **self.parquet_writer_kwargs )
return written
def _snake_case ( self: Optional[int] , a: BinaryIO , a: int , **a: str ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , a )
__lowerCamelCase : str = self.dataset.features.arrow_schema
__lowerCamelCase : Any = pq.ParquetWriter(a , schema=a , **a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCamelCase : Any = query_table(
table=self.dataset._data , key=slice(a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a )
written += batch.nbytes
writer.close()
return written
| 194 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : Any,_A : int,_A : Tuple=7,_A : Tuple=3,_A : int=30,_A : str=400,_A : Any=True,_A : Optional[Any]=None,_A : Optional[Any]=True,_A : Dict=[0.5, 0.5, 0.5],_A : Dict=[0.5, 0.5, 0.5],_A : str=True,_A : Optional[int]=1 / 255,_A : Dict=True,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = min_resolution
SCREAMING_SNAKE_CASE_ : List[str] = max_resolution
SCREAMING_SNAKE_CASE_ : List[str] = do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
SCREAMING_SNAKE_CASE_ : Dict = do_normalize
SCREAMING_SNAKE_CASE_ : Any = image_mean
SCREAMING_SNAKE_CASE_ : Any = image_std
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE_ : str = rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_pad
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : Union[str, Any],_A : List[str],_A : Tuple=False ):
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_inputs[0]
if isinstance(_A,Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Tuple = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE_ : int = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE_ : int = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_ : int = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : Tuple = max(_A,key=lambda _A : item[0] )[0]
SCREAMING_SNAKE_CASE_ : Dict = max(_A,key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( A__ , unittest.TestCase ):
A = DetaImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = DetaImageProcessingTester(self )
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A,"image_mean" ) )
self.assertTrue(hasattr(_A,"image_std" ) )
self.assertTrue(hasattr(_A,"do_normalize" ) )
self.assertTrue(hasattr(_A,"do_resize" ) )
self.assertTrue(hasattr(_A,"do_rescale" ) )
self.assertTrue(hasattr(_A,"do_pad" ) )
self.assertTrue(hasattr(_A,"size" ) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad,_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor_tester.get_expected_values(_A,batched=_A )
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(_A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Any = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A,numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Any = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(_A,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(_A,batched=_A )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A,torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
SCREAMING_SNAKE_CASE_ : int = image_processing(_A,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(_A,batched=_A )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt","r" ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DetaImageProcessor()
SCREAMING_SNAKE_CASE_ : Dict = image_processing(images=_A,annotations=_A,return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape,_A )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3],_A,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : int = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"],_A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape,_A )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0],_A,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"],_A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"],_A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"],_A ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"],_A ) )
# verify size
SCREAMING_SNAKE_CASE_ : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"],_A ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt","r" ) as f:
SCREAMING_SNAKE_CASE_ : str = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE_ : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE_ : Any = image_processing(images=_A,annotations=_A,masks_path=_A,return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3],_A,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"],_A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape,_A )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0],_A,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"],_A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"],_A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"],_A ) )
# verify masks
SCREAMING_SNAKE_CASE_ : str = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item(),_A )
# verify orig_size
SCREAMING_SNAKE_CASE_ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"],_A ) )
# verify size
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"],_A ) )
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase_ :
def __init__( self : str , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = str(id_ )
UpperCAmelCase__ : int = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Any = {} # {vertex:distance}
def __lt__( self : Optional[int] , _A : List[Any] ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return self.id
def lowercase_ ( self : Dict , _A : List[str] ):
'''simple docstring'''
self.neighbors.append(_A )
def lowercase_ ( self : Any , _A : Tuple , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = weight
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase__ )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> list:
UpperCAmelCase__ : Optional[int] = []
for u in graph:
UpperCAmelCase__ : str = math.inf
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : List[str] = graph[:]
while q:
UpperCAmelCase__ : Union[str, Any] = min(lowerCAmelCase__ )
q.remove(lowerCAmelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase__ : List[str] = u
UpperCAmelCase__ : List[str] = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase__ : List[str] = math.inf
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : List[str] = list(lowerCAmelCase__ )
hq.heapify(lowerCAmelCase__ )
while h:
UpperCAmelCase__ : List[str] = hq.heappop(lowerCAmelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase__ : int = u
UpperCAmelCase__ : Tuple = u.edges[v.id]
hq.heapify(lowerCAmelCase__ )
for i in range(1 , len(lowerCAmelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def a__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : str = (32, 32)
UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : List[str] = (32, 32)
UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (4, 32, 32)
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
UpperCAmelCase__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : Union[str, Any] = noise.to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A )
UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1e-3 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : str = noise.to(_A )
UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) )
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any , _A : str=(32, 32) ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : List[str] = self.dummy_input
UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A )
UpperCAmelCase__ : Optional[Any] = noise
UpperCAmelCase__ : Any = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Dict = (256, 256)
UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : int = (32, 32)
UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : int = model(_A , _A ).sample
UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
| 299 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''PerceiverFeatureExtractor''']
a_ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294 | 0 |
A : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
} | 33 |
import sys
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a = len(a__ )
__a = [[0 for x in range(a__ )] for x in range(a__ )]
__a = [[0 for x in range(a__ )] for x in range(a__ )]
for chain_length in range(2 , a__ ):
for a in range(1 , n - chain_length + 1 ):
__a = a + chain_length - 1
__a = sys.maxsize
for c in range(a__ , a__ ):
__a = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__a = cost
__a = c
return matrix, sol
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Any:
if i == j:
print('''A''' + str(a__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(a__ , a__ , optimal_solution[i][j] )
print_optiomal_solution(a__ , optimal_solution[i][j] + 1 , a__ )
print(''')''' , end=''' ''' )
def __lowerCAmelCase ( ) -> int:
__a = [30, 35, 15, 5, 10, 20, 25]
__a = len(a__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__a , __a = matrix_chain_order(a__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(a__ , 1 , n - 1 )
if __name__ == "__main__":
main() | 33 | 1 |
from math import factorial
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: int , lowerCAmelCase__: float ):
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
UpperCAmelCase_: int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase_: List[Any] = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.7_5))
| 147 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a : Dict = logging.get_logger(__name__)
a : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a : Tuple = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a : Optional[int] = {
'RUCAIBox/mvp': 1_024,
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
A = MvpTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: str = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop("""type""" ) )
UpperCAmelCase_: Dict = add_prefix_space
UpperCAmelCase_: List[str] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_: Optional[int] = """post_processor"""
UpperCAmelCase_: Any = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCAmelCase_: Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_: Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase_: int = tuple(state["""cls"""] )
UpperCAmelCase_: Any = False
if state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: Tuple = add_prefix_space
UpperCAmelCase_: Union[str, Any] = True
if state.get("""trim_offsets""", SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCAmelCase_: Optional[Any] = trim_offsets
UpperCAmelCase_: Dict = True
if changes_to_apply:
UpperCAmelCase_: Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop("""type""" ) )
UpperCAmelCase_: Dict = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[Any] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCAmelCase_: str = value
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: int = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: Union[str, Any] = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Any = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> int:
UpperCAmelCase_: Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Dict = [self.sep_token_id]
UpperCAmelCase_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 147 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
UpperCAmelCase_ : str = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(lowercase__ )
DownloadCommand.register_subcommand(lowercase__ )
EnvironmentCommand.register_subcommand(lowercase__ )
RunCommand.register_subcommand(lowercase__ )
ServeCommand.register_subcommand(lowercase__ )
UserCommands.register_subcommand(lowercase__ )
AddNewModelCommand.register_subcommand(lowercase__ )
AddNewModelLikeCommand.register_subcommand(lowercase__ )
LfsCommands.register_subcommand(lowercase__ )
PTtoTFCommand.register_subcommand(lowercase__ )
# Let's go
UpperCAmelCase_ : List[str] = parser.parse_args()
if not hasattr(lowercase__ , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase_ : Any = args.func(lowercase__ )
service.run()
if __name__ == "__main__":
main()
| 363 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def a__ ( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 67 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase : Union[str, Any] = """true"""
def A_ ( _UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16 ):
set_seed(42 )
SCREAMING_SNAKE_CASE_: Optional[int] = RegressionModel()
SCREAMING_SNAKE_CASE_: List[str] = deepcopy(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = RegressionDataset(length=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
return model, ddp_model, dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: Dict = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
SCREAMING_SNAKE_CASE_: int = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: int = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
SCREAMING_SNAKE_CASE_: Dict = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(_UpperCAmelCase , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16 )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = get_dataloader(_UpperCAmelCase , not dispatch_batches )
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = []
for batch in dataloader:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase )
targs.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = torch.cat(_UpperCAmelCase ), torch.cat(_UpperCAmelCase )
return logits, targs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert (
len(_UpperCAmelCase ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase )}"
def A_ ( _UpperCAmelCase = False , _UpperCAmelCase = False ):
SCREAMING_SNAKE_CASE_: int = evaluate.load("glue" , "mrpc" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase )
# First do baseline
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = setup["no"]
model.to(_UpperCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase )
with torch.inference_mode():
SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCAmelCase , references=batch["labels"] )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_: Any = batch["labels"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(_UpperCAmelCase , _UpperCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE_: int = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(_UpperCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
SCREAMING_SNAKE_CASE_: Optional[int] = Accelerator()
test_torch_metrics(_UpperCAmelCase , 5_12 )
accelerator.state._reset_state()
def A_ ( _UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 13 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 | 1 |
def UpperCAmelCase_ (_lowerCAmelCase : int = 1_00 ):
__UpperCamelCase : Any = set()
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : Tuple = n + 1 # maximum limit
for a in range(2 , _lowerCAmelCase ):
for b in range(2 , _lowerCAmelCase ):
__UpperCamelCase : Optional[int] = a**b # calculates the current power
collect_powers.add(_lowerCAmelCase ) # adds the result to the set
return len(_lowerCAmelCase )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip()))) | 171 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase : Union[str, Any] = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 171 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCAmelCase__ = 10
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
"""simple docstring"""
for i in range(__snake_case , __snake_case ):
if array[i] == target:
return i
return -1
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> int:
"""simple docstring"""
_lowercase =0
_lowercase =len(__snake_case )
while left <= right:
if right - left < precision:
return lin_search(__snake_case , __snake_case , __snake_case , __snake_case )
_lowercase =(left + right) // 3 + 1
_lowercase =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowercase =one_third - 1
elif array[two_third] < target:
_lowercase =two_third + 1
else:
_lowercase =one_third + 1
_lowercase =two_third - 1
else:
return -1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(__snake_case , __snake_case , __snake_case , __snake_case )
_lowercase =(left + right) // 3 + 1
_lowercase =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__snake_case , one_third - 1 , __snake_case , __snake_case )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __snake_case , __snake_case , __snake_case )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __snake_case , __snake_case )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = input('''Enter numbers separated by comma:\n''').strip()
UpperCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
UpperCAmelCase__ = int(input('''Enter the number to be found in the list:\n''').strip())
UpperCAmelCase__ = ite_ternary_search(collection, target)
UpperCAmelCase__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 5 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( ) -> int:
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
lowercase__: Optional[int] = 1
lowercase__: List[Any] = 2
while i * i <= n:
lowercase__: Any = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return next(i for i in triangle_number_generator() if count_divisors(__UpperCAmelCase ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 2 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 2 | 1 |
"""simple docstring"""
__A : str = tuple[float, float, float]
__A : str = tuple[float, float, float]
def lowercase ( __snake_case : Pointad , __snake_case : Pointad ):
lowercase_ : List[Any] = end_pointa[0] - end_pointa[0]
lowercase_ : List[Any] = end_pointa[1] - end_pointa[1]
lowercase_ : List[str] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowercase ( __snake_case : Vectorad , __snake_case : Vectorad ):
lowercase_ : Any = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ : Optional[Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ : int = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowercase ( __snake_case : Vectorad , __snake_case : int ):
return tuple(round(__snake_case , __snake_case ) for x in vector ) == (0, 0, 0)
def lowercase ( __snake_case : Pointad , __snake_case : Pointad , __snake_case : Pointad , __snake_case : int = 1_0 ):
lowercase_ : Any = create_vector(__snake_case , __snake_case )
lowercase_ : Any = create_vector(__snake_case , __snake_case )
return is_zero_vector(get_ad_vectors_cross(__snake_case , __snake_case ) , __snake_case )
| 33 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: int , lowercase_: Dict , lowercase_: Tuple ) -> Any:
# Construct model
if gpta_config_file == "":
A__ : Dict = GPTaConfig()
else:
A__ : List[Any] = GPTaConfig.from_json_file(lowercase_ )
A__ : Tuple = GPTaModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A_ : str = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 192 | 0 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( lowercase ):
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ ,'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ ,'num_heads' ) )
class lowercase__ :
def __init__( self : int ,lowerCamelCase__ : str ,lowerCamelCase__ : List[Any]=13 ,lowerCamelCase__ : Tuple=64 ,lowerCamelCase__ : Optional[int]=3 ,lowerCamelCase__ : List[str]=[16, 48, 96] ,lowerCamelCase__ : Union[str, Any]=[1, 3, 6] ,lowerCamelCase__ : Dict=[1, 2, 10] ,lowerCamelCase__ : int=[7, 3, 3] ,lowerCamelCase__ : Tuple=[4, 2, 2] ,lowerCamelCase__ : int=[2, 1, 1] ,lowerCamelCase__ : Optional[Any]=[2, 2, 2] ,lowerCamelCase__ : Optional[int]=[False, False, True] ,lowerCamelCase__ : int=[0.0, 0.0, 0.0] ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : Optional[int]=1E-12 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=2 ,):
'''simple docstring'''
_UpperCamelCase : Tuple = parent
_UpperCamelCase : Any = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : List[str] = patch_sizes
_UpperCamelCase : str = patch_stride
_UpperCamelCase : Union[str, Any] = patch_padding
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Tuple = num_labels
_UpperCamelCase : List[str] = num_channels
_UpperCamelCase : Optional[int] = embed_dim
_UpperCamelCase : int = num_heads
_UpperCamelCase : Dict = stride_kv
_UpperCamelCase : Optional[Any] = depth
_UpperCamelCase : str = cls_token
_UpperCamelCase : Tuple = attention_drop_rate
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : Tuple = layer_norm_eps
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Optional[int] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : str = CvtModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Union[str, Any] = model(lowerCamelCase__ )
_UpperCamelCase : Dict = (self.image_size, self.image_size)
_UpperCamelCase : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCamelCase : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCamelCase : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.num_labels
_UpperCamelCase : int = CvtForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Optional[Any] = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : str = CvtModelTester(self )
_UpperCamelCase : int = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return
@unittest.skip(reason='Cvt does not output attentions' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Tuple = model_class(lowerCamelCase__ )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : str = [*signature.parameters.keys()]
_UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str ):
_UpperCamelCase : int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : Any = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Union[str, Any] = outputs.hidden_states
_UpperCamelCase : str = len(self.model_tester.depth )
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : str = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[Any] = CvtModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( ):
_UpperCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
_UpperCamelCase : Dict = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : List[str] = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Tuple = model(**lowerCamelCase__ )
# verify the logits
_UpperCamelCase : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
_UpperCamelCase : Tuple = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
| 358 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : List[str] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowercase__ ( lowercase ):
lowercase__ = """gptj"""
lowercase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any ,lowerCamelCase__ : Optional[Any]=50400 ,lowerCamelCase__ : Tuple=2048 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : int=28 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Optional[Any]=64 ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : List[Any]="gelu_new" ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : Tuple=1E-5 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : str=50256 ,lowerCamelCase__ : Any=50256 ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Optional[Any] = n_positions
_UpperCamelCase : Union[str, Any] = n_embd
_UpperCamelCase : Any = n_layer
_UpperCamelCase : Optional[int] = n_head
_UpperCamelCase : List[str] = n_inner
_UpperCamelCase : List[Any] = rotary_dim
_UpperCamelCase : int = activation_function
_UpperCamelCase : Dict = resid_pdrop
_UpperCamelCase : Any = embd_pdrop
_UpperCamelCase : Union[str, Any] = attn_pdrop
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : str = use_cache
_UpperCamelCase : Union[str, Any] = bos_token_id
_UpperCamelCase : Any = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,tie_word_embeddings=lowerCamelCase__ ,**lowerCamelCase__ )
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : str = "default" ,lowerCamelCase__ : List[PatchingSpec] = None ,lowerCamelCase__ : bool = False ,):
'''simple docstring'''
super().__init__(lowerCamelCase__ ,task=lowerCamelCase__ ,patching_specs=lowerCamelCase__ ,use_past=lowerCamelCase__ )
if not getattr(self._config ,'pad_token_id' ,lowerCamelCase__ ):
# TODO: how to do that better?
_UpperCamelCase : int = 0
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ ,direction='inputs' )
_UpperCamelCase : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_UpperCamelCase : Any = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self._config.n_head
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = super(lowerCamelCase__ ,self ).generate_dummy_inputs(
lowerCamelCase__ ,batch_size=lowerCamelCase__ ,seq_length=lowerCamelCase__ ,is_pair=lowerCamelCase__ ,framework=lowerCamelCase__ )
# We need to order the input in the way they appears in the forward()
_UpperCamelCase : Tuple = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCamelCase , _UpperCamelCase : str = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCamelCase : Optional[int] = seqlen + 2
_UpperCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCamelCase : Optional[Any] = [
(torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers )
]
_UpperCamelCase : Union[str, Any] = common_inputs['attention_mask']
if self.use_past:
_UpperCamelCase : Any = ordered_inputs['attention_mask'].dtype
_UpperCamelCase : List[str] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCamelCase__ ,lowerCamelCase__ ,dtype=lowerCamelCase__ )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return 13
| 236 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="resnet50" , _lowerCAmelCase=3 , _lowerCAmelCase=32 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , ) -> Optional[Any]:
_lowerCAmelCase = parent
_lowerCAmelCase = out_indices if out_indices is not None else [4]
_lowerCAmelCase = stage_names
_lowerCAmelCase = out_features
_lowerCAmelCase = backbone
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = use_pretrained_backbone
_lowerCAmelCase = is_training
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = self.get_config()
return config, pixel_values
def _snake_case ( self ) -> Union[str, Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = TimmBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase_ ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
__lowerCamelCase : int = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Any = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : List[str] = False
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TimmBackboneModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _snake_case ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = '''resnet18'''
_lowerCAmelCase = '''microsoft/resnet-18'''
_lowerCAmelCase = AutoBackbone.from_pretrained(_UpperCAmelCase , use_timm_backbone=_UpperCAmelCase )
_lowerCAmelCase = AutoBackbone.from_pretrained(_UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_lowerCAmelCase = AutoBackbone.from_pretrained(_UpperCAmelCase , use_timm_backbone=_UpperCAmelCase , out_indices=[1, 2, 3] )
_lowerCAmelCase = AutoBackbone.from_pretrained(_UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn\'t support feed forward chunking" )
def _snake_case ( self ) -> str:
pass
@unittest.skip("TimmBackbone doesn\'t have num_hidden_layers attribute" )
def _snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def _snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip("TimmBackbone models doesn\'t have inputs_embeds" )
def _snake_case ( self ) -> List[Any]:
pass
@unittest.skip("TimmBackbone models doesn\'t have inputs_embeds" )
def _snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def _snake_case ( self ) -> str:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _snake_case ( self ) -> Optional[int]:
pass
@unittest.skip("model weights aren\'t tied in TimmBackbone." )
def _snake_case ( self ) -> Any:
pass
@unittest.skip("model weights aren\'t tied in TimmBackbone." )
def _snake_case ( self ) -> int:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _snake_case ( self ) -> int:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _snake_case ( self ) -> Union[str, Any]:
pass
@unittest.skip("TimmBackbone doesn\'t have hidden size info in its configuration." )
def _snake_case ( self ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone doesn\'t support output_attentions." )
def _snake_case ( self ) -> List[Any]:
pass
@unittest.skip("Safetensors is not supported by timm." )
def _snake_case ( self ) -> Optional[int]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ) -> str:
pass
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_UpperCAmelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowerCAmelCase = self.all_model_classes[0]
_lowerCAmelCase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase = model(**_UpperCAmelCase )
_lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(**_UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowerCAmelCase = copy.deepcopy(_UpperCAmelCase )
_lowerCAmelCase = None
_lowerCAmelCase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(**_UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_lowerCAmelCase = copy.deepcopy(_UpperCAmelCase )
_lowerCAmelCase = False
_lowerCAmelCase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(**_UpperCAmelCase )
| 158 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A = NewType('''DataClass''', Any)
A = NewType('''DataClassType''', Any)
def __A ( a_ :List[str]) -> Tuple:
if isinstance(a_ , a_):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""")
def __A ( a_ :list) -> Callable[[str], Any]:
__a : Any = {str(a_): choice for choice in choices}
return lambda a_: str_to_choice.get(a_ , a_)
def __A ( *,
a_ :Union[str, List[str]] = None , a_ :str = None , a_ :Any = dataclasses.MISSING , a_ :Callable[[], Any] = dataclasses.MISSING , a_ :dict = None , **a_ :str , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__a : List[Any] = {}
if aliases is not None:
__a : Optional[Any] = aliases
if help is not None:
__a : int = help
return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 42
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__a : str = ArgumentDefaultsHelpFormatter
super().__init__(**_UpperCAmelCase )
if dataclasses.is_dataclass(_UpperCAmelCase ):
__a : int = [dataclass_types]
__a : Optional[Any] = list(_UpperCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_UpperCAmelCase )
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = f"""--{field.name}"""
__a : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _UpperCAmelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__a : Dict = kwargs.pop('''aliases''' , [] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = [aliases]
__a : Tuple = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_UpperCAmelCase , '''UnionType''' ) and isinstance(_UpperCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_UpperCAmelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(_UpperCAmelCase ) not in field.type.__args__:
# filter `str` in Union
__a : List[str] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__a : List[str] = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__a : List[str] = (
field.type.__args__[0] if isinstance(_UpperCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__a : Optional[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__a : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , _UpperCAmelCase ) and issubclass(field.type , _UpperCAmelCase )):
if origin_type is Literal:
__a : int = field.type.__args__
else:
__a : List[str] = [x.value for x in field.type]
__a : Any = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__a : Tuple = field.default
else:
__a : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__a : Any = copy(_UpperCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
__a : List[str] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__a : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__a : List[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
__a : Union[str, Any] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__a : List[Any] = True
elif isclass(_UpperCAmelCase ) and issubclass(_UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = field.type.__args__[0]
__a : Optional[int] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__a : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
__a : List[Any] = True
else:
__a : int = field.type
if field.default is not dataclasses.MISSING:
__a : Optional[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
__a : Optional[int] = field.default_factory()
else:
__a : Union[str, Any] = True
parser.add_argument(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__a : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , '''_argument_group_name''' ):
__a : Any = self.add_argument_group(dtype._argument_group_name )
else:
__a : Optional[Any] = self
try:
__a : Dict[str, type] = get_type_hints(_UpperCAmelCase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_UpperCAmelCase ):
__a : Union[str, Any] = '''.'''.join(map(_UpperCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_UpperCAmelCase ):
if not field.init:
continue
__a : str = type_hints[field.name]
self._parse_dataclass_field(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__a : int = []
if args_filename:
args_files.append(Path(_UpperCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__a : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(_UpperCAmelCase , type=_UpperCAmelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__a , __a : List[Any] = args_file_parser.parse_known_args(args=_UpperCAmelCase )
__a : Union[str, Any] = vars(_UpperCAmelCase ).get(args_file_flag.lstrip('''-''' ) , _UpperCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_UpperCAmelCase ) for p in cmd_args_file_paths] )
__a : Union[str, Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__a : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
__a , __a : str = self.parse_known_args(args=_UpperCAmelCase )
__a : Optional[int] = []
for dtype in self.dataclass_types:
__a : Optional[int] = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
__a : List[str] = {k: v for k, v in vars(_UpperCAmelCase ).items() if k in keys}
for k in keys:
delattr(_UpperCAmelCase , _UpperCAmelCase )
__a : int = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_UpperCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Tuple = set(args.keys() )
__a : List[str] = []
for dtype in self.dataclass_types:
__a : Dict = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
__a : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__a : Tuple = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_UpperCAmelCase )}""" )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
with open(Path(_UpperCAmelCase ) , encoding='''utf-8''' ) as open_json_file:
__a : int = json.loads(open_json_file.read() )
__a : str = self.parse_dict(_UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Tuple = self.parse_dict(yaml.safe_load(Path(_UpperCAmelCase ).read_text() ) , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase ) | 160 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00 ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 313 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( A__ ):
A__ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
A__ = 'CIDAS/clipseg-rd64-refined'
A__ = 'image_segmenter'
A__ = CLIPSegForImageSegmentation
A__ = ['image', 'text']
A__ = ['image']
def __init__( self : Any , *_a : Dict , **_a : str ) -> Any:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*_a , **_a )
def A ( self : int , _a : "Image" , _a : str ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_a , return_tensors='pt' )
def A ( self : Dict , _a : Dict ) -> str:
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE =self.model(**_a ).logits
return logits
def A ( self : Any , _a : str ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =outputs.cpu().detach().numpy()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 47 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase : List[Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCamelCase : Any = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCamelCase : Optional[Any] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCamelCase : Optional[Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Tuple ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def A ( self : Union[str, Any] , _a : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def A ( self : int , _a : Tuple , _a : List[str] , _a : List[str]=0.9 , _a : Dict=3 , _a : Optional[int]=0.5 ) -> Optional[int]:
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5' ):
_SCREAMING_SNAKE_CASE =[
meteor_score.single_meteor_score(
word_tokenize(_a ) , word_tokenize(_a ) , alpha=_a , beta=_a , gamma=_a )
for ref, pred in zip(_a , _a )
]
else:
_SCREAMING_SNAKE_CASE =[
meteor_score.single_meteor_score(_a , _a , alpha=_a , beta=_a , gamma=_a )
for ref, pred in zip(_a , _a )
]
return {"meteor": np.mean(_a )}
| 47 | 1 |
'''simple docstring'''
import random
from typing import Any
def _lowerCAmelCase ( lowercase ) -> list[Any]:
for _ in range(len(lowercase ) ):
__lowerCAmelCase = random.randint(0 , len(lowercase ) - 1 )
__lowerCAmelCase = random.randint(0 , len(lowercase ) - 1 )
__lowerCAmelCase , __lowerCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
_a : Optional[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
_a : Tuple = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 46 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : List[str] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""decision_transformer"""
a : List[Any] =["""past_key_values"""]
a : Dict ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=17,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=1_28,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 46 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__A : List[str] = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Any = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : str , **A : List[Any] ) -> Union[str, Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ : List[str] = deprecated_arg[3:]
setattr(self , A , not kwargs.pop(A ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ : Optional[Any] = kwargs.pop('''torchscript''' , self.torchscript )
lowercase_ : Tuple = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowercase_ : Any = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**A )
SCREAMING_SNAKE_CASE_ : bool = field(default=_A , metadata={"help": "Trace the models using torchscript"} )
SCREAMING_SNAKE_CASE_ : bool = field(default=_A , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
SCREAMING_SNAKE_CASE_ : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def A ( self : Optional[Any] ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowercase_ : List[Any] = torch.device('''cpu''' )
lowercase_ : Optional[int] = 0
elif is_torch_tpu_available():
lowercase_ : Optional[Any] = xm.xla_device()
lowercase_ : Union[str, Any] = 0
else:
lowercase_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase_ : str = torch.cuda.device_count()
return device, n_gpu
@property
def A ( self : Any ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def A ( self : str ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def A ( self : int ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def A ( self : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def A ( self : List[str] ) -> Optional[Any]:
return self.n_gpu > 0
| 33 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MobileNetV2FeatureExtractor''']
__A : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 | 1 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE__ = 'base_with_context'
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Any:
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCamelCase = weights[F"layers_{lyr_num}"]
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
UpperCamelCase = ly_weight["""attention"""]
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCamelCase = weights[F"layers_{lyr_num}"]
UpperCamelCase = ly_weight["""attention"""]
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCamelCase )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCamelCase = weights[F"layers_{lyr_num}"]
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
UpperCamelCase = ly_weight["""self_attention"""]
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCamelCase = ly_weight["""MultiHeadDotProductAttention_0"""]
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCamelCase = jnp.tree_util.tree_map(onp.array , __UpperCamelCase )
UpperCamelCase = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
UpperCamelCase = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
UpperCamelCase = inference.parse_training_gin_file(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = inference.InferenceModel(args.checkpoint_path , __UpperCamelCase )
UpperCamelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCamelCase = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCamelCase )
UpperCamelCase = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCamelCase )
UpperCamelCase = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCamelCase )
UpperCamelCase = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCamelCase , continuous_encoder=__UpperCamelCase , decoder=__UpperCamelCase , scheduler=__UpperCamelCase , melgan=__UpperCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 183 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Any:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def A__ ( self ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase ,UpperCamelCase = image.size
else:
UpperCamelCase ,UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = self.size["""shortest_edge"""]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase ,UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""image_id""": 39769, """annotations""": target}
# encode them
UpperCamelCase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
UpperCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCamelCase = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
| 183 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowerCamelCase_ ) )
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0.0
for coeff in reversed(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ : int =result * x + coeff
return result
if __name__ == "__main__":
a_ = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 152 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __snake_case ( __lowerCAmelCase ):
a__ = """yolos"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=[5_12, 8_64] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=1_00 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase)
a__: int = hidden_size
a__: Optional[Any] = num_hidden_layers
a__: str = num_attention_heads
a__: List[str] = intermediate_size
a__: Optional[Any] = hidden_act
a__: str = hidden_dropout_prob
a__: Union[str, Any] = attention_probs_dropout_prob
a__: Optional[int] = initializer_range
a__: int = layer_norm_eps
a__: List[str] = image_size
a__: Optional[int] = patch_size
a__: Optional[int] = num_channels
a__: List[str] = qkv_bias
a__: List[Any] = num_detection_tokens
a__: Dict = use_mid_position_embeddings
a__: Optional[Any] = auxiliary_loss
# Hungarian matcher
a__: Dict = class_cost
a__: str = bbox_cost
a__: List[Any] = giou_cost
# Loss coefficients
a__: Union[str, Any] = bbox_loss_coefficient
a__: Tuple = giou_loss_coefficient
a__: Any = eos_coefficient
class __snake_case ( __lowerCAmelCase ):
a__ = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCamelCase_ ( self) -> float:
'''simple docstring'''
return 1e-4
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return 12
| 203 | """simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase__ = None
try:
import msvcrt
except ImportError:
lowercase__ = None
try:
import fcntl
except ImportError:
lowercase__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase__ = OSError
# Data
# ------------------------------------------------
lowercase__ = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowercase__ = '3.0.12'
lowercase__ = None
def __a ( ) ->List[Any]:
global _logger
a__: str = _logger or logging.getLogger(__name__ )
return _logger
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase) -> Any:
'''simple docstring'''
a__: List[Any] = lock_file
return None
def __str__( self) -> List[str]:
'''simple docstring'''
a__: int = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __snake_case :
def __init__( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Any = lock
return None
def __enter__( self) -> List[Any]:
'''simple docstring'''
return self.lock
def __exit__( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
self.lock.release()
return None
class __snake_case :
def __init__( self , lowercase , lowercase=-1 , lowercase=None) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
a__: Tuple = self.hash_filename_if_too_long(lowercase , lowercase)
# The path to the lock file.
a__: Any = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
a__: Dict = None
# The default timeout value.
a__: Union[str, Any] = timeout
# We use this lock primarily for the lock counter.
a__: Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
a__: Tuple = 0
return None
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: Optional[int] = float(lowercase)
return None
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase_ ( self , lowercase=None , lowercase=0.05) -> int:
'''simple docstring'''
if timeout is None:
a__: int = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
a__: Optional[int] = id(self)
a__: Union[str, Any] = self._lock_file
a__: Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}')
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}')
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}')
raise Timeout(self._lock_file)
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...')
time.sleep(lowercase)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
a__: Optional[int] = max(0 , self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def lowerCamelCase_ ( self , lowercase=False) -> Tuple:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
a__: List[str] = id(self)
a__: List[Any] = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}')
self._release()
a__: List[str] = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}')
return None
def __enter__( self) -> Dict:
'''simple docstring'''
self.acquire()
return self
def __exit__( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
self.release()
return None
def __del__( self) -> Optional[int]:
'''simple docstring'''
self.release(force=lowercase)
return None
def lowerCamelCase_ ( self , lowercase , lowercase) -> str:
'''simple docstring'''
a__: List[str] = os.path.basename(lowercase)
if len(lowercase) > max_length and max_length > 0:
a__: str = os.path.dirname(lowercase)
a__: Optional[int] = str(hash(lowercase))
a__: List[str] = filename[: max_length - len(lowercase) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowercase , lowercase)
else:
return path
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase=-1 , lowercase=None) -> Dict:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowercase , timeout=lowercase , max_filename_length=lowercase)
a__: List[Any] = '\\\\?\\' + relative_to_absolute_path(self.lock_file)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: str = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
a__: Tuple = os.open(self._lock_file , lowercase)
except OSError:
pass
else:
try:
msvcrt.locking(lowercase , msvcrt.LK_NBLCK , 1)
except OSError:
os.close(lowercase)
else:
a__: Dict = fd
return None
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: int = self._lock_file_fd
a__: Union[str, Any] = None
msvcrt.locking(lowercase , msvcrt.LK_UNLCK , 1)
os.close(lowercase)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase=-1 , lowercase=None) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = os.statvfs(os.path.dirname(lowercase)).f_namemax
super().__init__(lowercase , timeout=lowercase , max_filename_length=lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
a__: int = os.open(self._lock_file , lowercase)
try:
fcntl.flock(lowercase , fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(lowercase)
else:
a__: Any = fd
return None
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: str = self._lock_file_fd
a__: Tuple = None
fcntl.flock(lowercase , fcntl.LOCK_UN)
os.close(lowercase)
return None
class __snake_case ( __lowerCAmelCase ):
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: int = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
a__: Optional[Any] = os.open(self._lock_file , lowercase)
except OSError:
pass
else:
a__: Union[str, Any] = fd
return None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
os.close(self._lock_file_fd)
a__: int = None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase__ = None
if msvcrt:
lowercase__ = WindowsFileLock
elif fcntl:
lowercase__ = UnixFileLock
else:
lowercase__ = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 203 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[Any] = """▁"""
_UpperCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertGenerationTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : List[Any] ) -> List[str]:
super().setUp()
lowerCamelCase__ : Dict = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : List[str] = '<s>'
lowerCamelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase ) , 1002 )
def A_ ( self : List[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ) -> Tuple:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'Hello World!'
lowerCamelCase__ : Dict = [18536, 2260, 101]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : int ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : int = ' '.join(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Tuple = BertGenerationConfig()
lowerCamelCase__ : Optional[Any] = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
# fmt: off
lowerCamelCase__ : Any = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 50 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : int = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]:
for attribute in key.split("." ):
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A_ : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Any = value
elif weight_type == "bias":
A_ : str = value
else:
A_ : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]:
A_ : Optional[Any] = []
A_ : Any = fairseq_model.state_dict()
A_ : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ : str = None
for name, value in fairseq_dict.items():
A_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
A_ : Optional[Any] = True
elif name.split("." )[0] == "proj":
A_ : Dict = fairseq_model.proj
A_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2]
A_ : int = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
A_ : List[Any] = "weight_g"
elif "weight_v" in name:
A_ : List[Any] = "weight_v"
elif "bias" in name:
A_ : Dict = "bias"
elif "weight" in name:
A_ : List[Any] = "weight"
else:
A_ : Dict = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
A_ : Any = full_name.split("conv_layers." )[-1]
A_ : Optional[int] = name.split("." )
A_ : Optional[Any] = int(items[0] )
A_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str:
A_ , A_ : List[str] = emb.weight.shape
A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A_ : List[Any] = emb.weight.data
return lin_layer
def __snake_case ( _lowerCAmelCase : str ) -> Tuple:
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
A_ : int = f.readlines()
A_ : Dict = [line.split(" " )[0] for line in lines]
A_ : Tuple = len(_lowerCAmelCase )
A_ : Union[str, Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple:
A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
A_ : str = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
A_ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ : Union[str, Any] = model[0].eval()
# set weights for wav2vec2 encoder
A_ : Tuple = WavaVecaModel(_lowerCAmelCase )
A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase )
A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
A_ : Optional[Any] = False
# add projection layer
A_ : Optional[Any] = nn.Parameter(projection_layer.weight )
A_ : int = nn.Parameter(projection_layer.bias )
A_ : str = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
A_ : Optional[int] = hf_wavavec.config.to_dict()
A_ : int = tokenizer.pad_token_id
A_ : List[str] = tokenizer.bos_token_id
A_ : List[str] = tokenizer.eos_token_id
A_ : List[str] = "speech_to_text_2"
A_ : Tuple = "wav2vec2"
A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 300 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 284 | def snake_case (__lowercase ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
_snake_case : Any = [True] * (num + 1)
_snake_case : str = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowercase ):
_snake_case : Optional[int] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Any = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num)) | 284 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE () -> Any:
"""simple docstring"""
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def _SCREAMING_SNAKE_CASE (A ) -> List[Any]:
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
while i * i <= n:
lowercase__ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(A ) > 500 )
if __name__ == "__main__":
print(solution())
| 2 |
'''simple docstring'''
class __lowerCAmelCase : # Public class to implement a graph
'''simple docstring'''
def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ (self : Dict ): # And finally, count all islands.
'''simple docstring'''
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 2 | 1 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCAmelCase__ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=1_6 , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : List[Any]=1_4 , __lowerCAmelCase : str=1_0 , __lowerCAmelCase : Union[str, Any]=1_9 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=1_6 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=[1, 2, 3, 4, 5] , __lowerCAmelCase : Any=2_5 , __lowerCAmelCase : Tuple=5 , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = d_model
_lowerCamelCase : Dict = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : str = prediction_length
_lowerCamelCase : Dict = context_length
_lowerCamelCase : List[str] = cardinality
_lowerCamelCase : Any = num_time_features
_lowerCamelCase : Optional[Any] = lags_sequence
_lowerCamelCase : List[str] = embedding_dimension
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : str = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = context_length
_lowerCamelCase : Tuple = prediction_length + label_length
_lowerCamelCase : Optional[Any] = label_length
_lowerCamelCase : int = moving_average
_lowerCamelCase : List[Any] = autocorrelation_factor
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = config.context_length + max(config.lags_sequence )
_lowerCamelCase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowerCamelCase : Tuple = floats_tensor([self.batch_size, _past_length] )
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowerCamelCase : Any = floats_tensor([self.batch_size, config.prediction_length] )
_lowerCamelCase : Optional[int] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_config()
_lowerCamelCase : Optional[Any] = self.prepare_autoformer_inputs_dict(__lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = AutoformerModel(config=__lowerCAmelCase ).to(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
_lowerCamelCase : int = outputs.encoder_last_hidden_state
_lowerCamelCase : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Any = model.get_encoder()
encoder.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoformerEncoder.from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model.create_network_inputs(**__lowerCAmelCase )
_lowerCamelCase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowerCamelCase : Any = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_lowerCamelCase : int = encoder(inputs_embeds=__lowerCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_lowerCamelCase : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_lowerCamelCase : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_lowerCamelCase : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_lowerCamelCase : List[Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = AutoformerDecoder.from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : str = decoder(
trend=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
snake_case__ : int = (AutoformerForPrediction,) if is_torch_available() else ()
snake_case__ : Any = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
snake_case__ : Any = False
snake_case__ : Tuple = False
snake_case__ : List[str] = False
snake_case__ : Union[str, Any] = False
snake_case__ : Dict = False
snake_case__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = AutoformerModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = model_class.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertEqual(info['''missing_keys'''] , [] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowerCAmelCase )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = inspect.signature(getattr(__lowerCAmelCase , '''forward''' ) )
# The main input is the name of the argument after `self`
_lowerCamelCase : int = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Any = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(__lowerCAmelCase )] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = getattr(self.model_tester , '''seq_length''' , __lowerCAmelCase )
_lowerCamelCase : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , __lowerCAmelCase )
_lowerCamelCase : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = getattr(self.model_tester , '''d_model''' , __lowerCAmelCase )
_lowerCamelCase : int = getattr(self.model_tester , '''num_attention_heads''' , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase : Any = True
_lowerCamelCase : int = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[str] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : List[str] = outputs.encoder_attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# decoder attentions
_lowerCamelCase : int = outputs.decoder_attentions
self.assertIsInstance(__lowerCAmelCase , (list, tuple) )
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_lowerCamelCase : Tuple = outputs.cross_attentions
self.assertIsInstance(__lowerCAmelCase , (list, tuple) )
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[Any] = True
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(out_len + 2 , len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( A_ : int="train-batch.pt" ):
'''simple docstring'''
_lowerCamelCase : List[Any] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''', filename=A_, repo_type='''dataset''' )
_lowerCamelCase : Tuple = torch.load(A_, map_location=A_ )
return batch
@require_torch
@slow
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = prepare_batch()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_lowerCamelCase : int = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.tensor(
[[0.35_93, -1.33_98, 0.63_30], [0.22_79, 1.53_96, -0.17_92], [0.04_50, 1.32_25, -0.23_35]] , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_lowerCamelCase : int = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_lowerCamelCase : Any = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __lowerCAmelCase )
_lowerCamelCase : Any = torch.tensor(
[[-0.07_34, -0.90_36, 0.83_58], [4.71_86, 2.41_13, 1.95_81], [1.79_53, 2.35_58, 1.29_70]] , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_lowerCamelCase : Tuple = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_lowerCamelCase : str = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __lowerCAmelCase )
_lowerCamelCase : Any = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__lowerCAmelCase )
_lowerCamelCase : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __lowerCAmelCase , rtol=1E-1 ) )
| 362 |
"""simple docstring"""
def snake_case_ ( A_ : list ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = len(A_ )
for i in range(1, A_ ):
_lowerCamelCase : Tuple = collection[i]
_lowerCamelCase : Dict = 0
_lowerCamelCase : Any = i - 1
while low <= high:
_lowerCamelCase : Optional[int] = (low + high) // 2
if val < collection[mid]:
_lowerCamelCase : List[str] = mid - 1
else:
_lowerCamelCase : Dict = mid + 1
for j in range(A_, A_, -1 ):
_lowerCamelCase : Optional[int] = collection[j - 1]
_lowerCamelCase : Tuple = val
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 175 | 0 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_A : int =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
_A : Optional[int] =parser.parse_args()
_A : Optional[Any] =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 41 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''RegNetConfig'''
# Base docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = '''tabby, tabby cat'''
_lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , )
__UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) )
__UpperCamelCase : Dict = self.normalization(_UpperCAmelCase )
__UpperCamelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = config.num_channels
__UpperCamelCase : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def a_ (self , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
__UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" )
__UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
__UpperCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def a_ (self , _UpperCAmelCase ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
__UpperCamelCase : str = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1
__UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : List[Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase : Optional[Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ),
]
__UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : str = in_channels != out_channels or stride != 1
__UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase : Union[str, Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ),
]
__UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : str = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Any = layer_module(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def a_ (self , _UpperCAmelCase ) -> Any:
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase : Any = hidden_states + (hidden_state,)
__UpperCamelCase : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
A = RegNetConfig
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" )
__UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" )
__UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
@unpack_inputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : str = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : List[str] = encoder_outputs[0]
__UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
__UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = RegNetConfig
A = "regnet"
A = "pixel_values"
@property
def a_ (self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = config.num_labels
__UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
# classification head
__UpperCamelCase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Dict = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase )
__UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
__UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 298 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( __lowercase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = DDIMPipeline
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
def lowercase__ ( self : Any )->List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'''unet''': unet, '''scheduler''': scheduler}
return components
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int=0 )->List[Any]:
if str(_a ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_a )
else:
_UpperCAmelCase = torch.Generator(device=_a ).manual_seed(_a )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = '''cpu'''
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase = self.get_dummy_inputs(_a )
_UpperCAmelCase = pipe(**_a ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
_UpperCAmelCase = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def lowercase__ ( self : Any )->Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def lowercase__ ( self : Any )->Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowercase__ ( self : int )->Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : int )->Optional[int]:
_UpperCAmelCase = '''google/ddpm-cifar10-32'''
_UpperCAmelCase = UNetaDModel.from_pretrained(_a )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = DDIMPipeline(unet=_a , scheduler=_a )
ddim.to(_a )
ddim.set_progress_bar_config(disable=_a )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ddim(generator=_a , eta=0.0 , output_type='''numpy''' ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[int] )->Optional[Any]:
_UpperCAmelCase = '''google/ddpm-ema-bedroom-256'''
_UpperCAmelCase = UNetaDModel.from_pretrained(_a )
_UpperCAmelCase = DDIMScheduler.from_pretrained(_a )
_UpperCAmelCase = DDIMPipeline(unet=_a , scheduler=_a )
ddpm.to(_a )
ddpm.set_progress_bar_config(disable=_a )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ddpm(generator=_a , output_type='''numpy''' ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_UpperCAmelCase = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 350 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
__A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase_ = {'''facebook/blenderbot_small-90M''': 5_12}
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
_A = set(__lowercase )
return pairs
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : str="__start__" , __UpperCAmelCase : Dict="__end__" , __UpperCAmelCase : str="__unk__" , __UpperCAmelCase : List[str]="__null__" , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , **__UpperCAmelCase )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
_A = json.load(__UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
_A = merges_handle.read().split("\n" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A = {}
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_A = re.sub("([.,!?()])" , R" \1" , __UpperCAmelCase )
_A = re.sub("(')" , R" \1 " , __UpperCAmelCase )
_A = re.sub(R"\s{2,}" , " " , __UpperCAmelCase )
if "\n" in token:
_A = token.replace("\n" , " __newln__" )
_A = token.split(" " )
_A = []
for token in tokens:
if not len(__UpperCAmelCase ):
continue
_A = token.lower()
_A = tuple(__UpperCAmelCase )
_A = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_A = get_pairs(__UpperCAmelCase )
if not pairs:
words.append(__UpperCAmelCase )
continue
while True:
_A = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__UpperCAmelCase ):
try:
_A = word.index(__UpperCAmelCase , __UpperCAmelCase )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__UpperCAmelCase )
_A = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(__UpperCAmelCase )
_A = "@@ ".join(__UpperCAmelCase )
_A = word[:-4]
_A = word
words.append(__UpperCAmelCase )
return " ".join(__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = []
_A = re.findall(R"\S+\n?" , __UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = token.lower()
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = " ".join(__UpperCAmelCase ).replace("@@ " , "" ).strip()
return out_string
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
_A = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_A = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 79 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
_a = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _A () -> Tuple:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : NestedDataStructureLike[PathLike] ,lowerCamelCase__ : Optional[NamedSplit] = None ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Union[str, Any] ,) -> str:
'''simple docstring'''
super().__init__(
lowerCamelCase__ ,split=lowerCamelCase__ ,features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = field
SCREAMING_SNAKE_CASE = path_or_paths if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE = Json(
cache_dir=lowerCamelCase__ ,data_files=lowerCamelCase__ ,features=lowerCamelCase__ ,field=lowerCamelCase__ ,**lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,)
SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split=self.split ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : Union[PathLike, BinaryIO] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : str ,) -> Optional[int]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = path_or_buf
SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE = num_proc
SCREAMING_SNAKE_CASE = """utf-8"""
SCREAMING_SNAKE_CASE = to_json_kwargs
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""path_or_buf""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""orient""" ,"""records""" )
SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""lines""" ,True if orient == """records""" else False )
SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""index""" ,False if orient in ["""split""", """table"""] else True )
SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""compression""" ,lowerCamelCase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"""wb""" ,compression=lowerCamelCase__ ) as buffer:
SCREAMING_SNAKE_CASE = self._write(file_obj=lowerCamelCase__ ,orient=lowerCamelCase__ ,lines=lowerCamelCase__ ,index=lowerCamelCase__ ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
""" was passed. Please provide a local path instead.""" )
SCREAMING_SNAKE_CASE = self._write(
file_obj=self.path_or_buf ,orient=lowerCamelCase__ ,lines=lowerCamelCase__ ,index=lowerCamelCase__ ,**self.to_json_kwargs )
return written
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = args
SCREAMING_SNAKE_CASE = query_table(
table=self.dataset.data ,key=slice(lowerCamelCase__ ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
SCREAMING_SNAKE_CASE = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase__ ,orient=lowerCamelCase__ ,lines=lowerCamelCase__ ,index=lowerCamelCase__ ,**lowerCamelCase__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : BinaryIO ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Any ,) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating json from Arrow format""" ,):
SCREAMING_SNAKE_CASE = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,lowerCamelCase__ ,lowerCamelCase__ )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating json from Arrow format""" ,):
written += file_obj.write(lowerCamelCase__ )
return written
| 371 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ComputeEnvironment.AMAZON_SAGEMAKER
__snake_case : List[Any] = True
__snake_case : Optional[int] = "ml.p3.2xlarge"
__snake_case : List[str] = "accelerate_sagemaker_execution_role"
__snake_case : Tuple = "hf-sm"
__snake_case : Any = "us-east-1"
__snake_case : Union[str, Any] = 1
__snake_case : Dict = "accelerate-sagemaker-1"
__snake_case : Tuple = "1.6"
__snake_case : List[str] = "4.4"
__snake_case : str = "train.py"
__snake_case : List[str] = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
__snake_case : Optional[int] = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""do_train"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""epochs"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""learning_rate"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""max_steps"""] ,lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 193 | 0 |
"""simple docstring"""
import torch
from torch import nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 , lowerCAmelCase__=False ) -> Dict:
super().__init__()
a : Union[str, Any] = n_token
a : List[str] = d_embed
a : Dict = d_proj
a : Tuple = cutoffs + [n_token]
a : Union[str, Any] = [0] + self.cutoffs
a : Optional[int] = div_val
a : Optional[int] = self.cutoffs[0]
a : int = len(self.cutoffs ) - 1
a : Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
a : Dict = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
a : int = nn.Parameter(torch.zeros(self.n_clusters ) )
a : int = nn.ModuleList()
a : Dict = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
a, a : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
a : Tuple = keep_order
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if proj is None:
a : List[Any] = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
a : Union[str, Any] = nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
a : Tuple = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False ) -> Tuple:
if labels is not None:
# Shift so that tokens < n predict n
a : str = hidden[..., :-1, :].contiguous()
a : Optional[int] = labels[..., 1:].contiguous()
a : Any = hidden.view(-1 , hidden.size(-1 ) )
a : List[str] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
a : int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
a : List[str] = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
a : Any = labels != -100
a : Optional[int] = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
a : Union[str, Any] = (
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
a : Optional[int] = nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
a, a : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a, a : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a : Dict = self.out_layers[0].weight[l_idx:r_idx]
a : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
a : Optional[int] = self.out_layers[i].weight
a : Dict = self.out_layers[i].bias
if i == 0:
a : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
a, a, a : int = weights[0], biases[0], self.out_projs[0]
a : Optional[int] = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a : Optional[int] = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
a : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
a : Dict = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
a : List[Any] = 0
a : int = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
a, a : List[Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
a : Optional[int] = (labels >= l_idx) & (labels < r_idx)
a : List[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
a : List[str] = labels.index_select(0 , lowerCAmelCase_ ) - l_idx
a : Any = head_logprob.index_select(0 , lowerCAmelCase_ )
a : int = hidden.index_select(0 , lowerCAmelCase_ )
else:
a : str = hidden
if i == 0:
if labels is not None:
a : Optional[int] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
a : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
a, a, a : List[Any] = weights[i], biases[i], self.out_projs[i]
a : Optional[Any] = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a : List[str] = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
a : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
a : List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
a : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
a : int = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __a ( self , lowerCAmelCase__ ) -> List[str]:
if self.n_clusters == 0:
a : Tuple = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
a, a : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a, a : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
a : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
a : List[str] = self.out_layers[i].weight
a : Tuple = self.out_layers[i].bias
if i == 0:
a : List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
a, a, a : Optional[Any] = weights[0], biases[0], self.out_projs[0]
a : Any = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
a : Dict = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
a : str = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
a, a : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
a : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
a, a, a : Optional[int] = weights[i], biases[i], self.out_projs[i]
a : str = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a : Tuple = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
a : str = head_logprob[:, -i] + tail_logprob_i
a : Optional[Any] = logprob_i
return out
| 105 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "sequence-classification"
def __init__( self : Optional[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
if type(lowerCAmelCase_) == dict:
lowercase_ = Namespace(**lowerCAmelCase_)
lowercase_ = glue_output_modes[hparams.task]
lowercase_ = glue_tasks_num_labels[hparams.task]
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , self.mode)
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
return self.model(**lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ = outputs[0]
lowercase_ = self.trainer.lr_schedulers[0]["""scheduler"""]
lowercase_ = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.hparams
lowercase_ = processors[args.task]()
lowercase_ = processor.get_labels()
for mode in ["train", "dev"]:
lowercase_ = self._feature_file(lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowercase_ = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowercase_ = convert_examples_to_features(
lowerCAmelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase_)
torch.save(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False):
"""simple docstring"""
lowercase_ = """dev""" if mode == """test""" else mode
lowercase_ = self._feature_file(lowerCAmelCase_)
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
lowercase_ = torch.load(lowerCAmelCase_)
lowercase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ , lowercase_ = outputs[:2]
lowercase_ = logits.detach().cpu().numpy()
lowercase_ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowercase_ = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowercase_ = np.argmax(lowerCAmelCase_ , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = np.squeeze(lowerCAmelCase_)
lowercase_ = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , lowerCAmelCase_ , lowerCAmelCase_)}
lowercase_ = dict(results.items())
lowercase_ = results
return ret, preds_list, out_label_list
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : list):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_)
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowerCAmelCase_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
add_generic_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = GLUETransformer.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase_ = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
lowercase_ = GLUETransformer(__lowerCAmelCase )
lowercase_ = generic_train(__lowerCAmelCase , __lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase_ = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__lowerCAmelCase ) )
lowercase_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 136 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''AutoImageProcessor'''
snake_case_ = '''AutoTokenizer'''
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
__a = False
def __call__( self , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
__a = kwargs.pop('''images''' , _snake_case )
__a = kwargs.pop('''text''' , _snake_case )
if len(_snake_case ) > 0:
__a = args[0]
__a = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__a = self.image_processor(_snake_case , *_snake_case , **_snake_case )
if text is not None:
__a = self.tokenizer(_snake_case , **_snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
__a = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
__a = True
__a = self.tokenizer
yield
__a = self.image_processor
__a = False
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False , _snake_case=None ) -> List[Any]:
'''simple docstring'''
if added_vocab is None:
__a = self.tokenizer.get_added_vocab()
__a = {}
while tokens:
__a = re.search(r'''<s_(.*?)>''' , _snake_case , re.IGNORECASE )
if start_token is None:
break
__a = start_token.group(1 )
__a = re.search(rF"""</s_{key}>""" , _snake_case , re.IGNORECASE )
__a = start_token.group()
if end_token is None:
__a = tokens.replace(_snake_case , '''''' )
else:
__a = end_token.group()
__a = re.escape(_snake_case )
__a = re.escape(_snake_case )
__a = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _snake_case , re.IGNORECASE )
if content is not None:
__a = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__a = self.tokenajson(_snake_case , is_inner_value=_snake_case , added_vocab=_snake_case )
if value:
if len(_snake_case ) == 1:
__a = value[0]
__a = value
else: # leaf nodes
__a = []
for leaf in content.split(r'''<sep/>''' ):
__a = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__a = leaf[1:-2] # for categorical special tokens
output[key].append(_snake_case )
if len(output[key] ) == 1:
__a = output[key][0]
__a = tokens[tokens.find(_snake_case ) + len(_snake_case ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_snake_case , added_vocab=_snake_case )
if len(_snake_case ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , )
return self.image_processor | 33 |
import sys
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a = len(a__ )
__a = [[0 for x in range(a__ )] for x in range(a__ )]
__a = [[0 for x in range(a__ )] for x in range(a__ )]
for chain_length in range(2 , a__ ):
for a in range(1 , n - chain_length + 1 ):
__a = a + chain_length - 1
__a = sys.maxsize
for c in range(a__ , a__ ):
__a = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__a = cost
__a = c
return matrix, sol
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Any:
if i == j:
print('''A''' + str(a__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(a__ , a__ , optimal_solution[i][j] )
print_optiomal_solution(a__ , optimal_solution[i][j] + 1 , a__ )
print(''')''' , end=''' ''' )
def __lowerCAmelCase ( ) -> int:
__a = [30, 35, 15, 5, 10, 20, 25]
__a = len(a__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__a , __a = matrix_chain_order(a__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(a__ , 1 , n - 1 )
if __name__ == "__main__":
main() | 33 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=10 ) -> str:
"""simple docstring"""
__lowerCamelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=10 ) -> Any:
"""simple docstring"""
__lowerCamelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(UpperCamelCase__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertAlmostEqual(lowerCamelCase__ , lowerCamelCase__ , delta=lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
__lowerCamelCase = criterion(lowerCamelCase__ , lowerCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase__ , weight_decay=0.0 , relative_step=lowerCamelCase__ , scale_parameter=lowerCamelCase__ , warmup_init=lowerCamelCase__ , )
for _ in range(1_000 ):
__lowerCamelCase = criterion(lowerCamelCase__ , lowerCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = nn.Linear(50 , 50 ) if is_torch_available() else None
snake_case_ = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
snake_case_ = 10
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Any:
'''simple docstring'''
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertAlmostEqual(lowerCamelCase__ , lowerCamelCase__ , delta=lowerCamelCase__ , msg=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowerCamelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
__lowerCamelCase , __lowerCamelCase = data
__lowerCamelCase = scheduler_func(self.optimizer , **lowerCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowerCamelCase = unwrap_schedule(lowerCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
lowerCamelCase__ , lowerCamelCase__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
__lowerCamelCase = scheduler_func(self.optimizer , **lowerCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase__ ) # wrap to test picklability of the schedule
__lowerCamelCase = unwrap_and_save_reload_schedule(lowerCamelCase__ , self.num_steps )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ , msg=f"""failed for {scheduler_func} in save and reload""" )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = fn
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.fn(*lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = list(map(self , scheduler.lr_lambdas ) )
| 90 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 20_48,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__lowerCamelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = pre_tok_class(**lowerCamelCase__ )
__lowerCamelCase = add_prefix_space
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
__lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 90 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCAmelCase__ : Optional[Any] ={
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class __A ( a ):
"""simple docstring"""
__A = """ernie_m"""
__A = {"""dropout""": """classifier_dropout""", """num_classes""": """num_labels"""}
def __init__( self , UpperCAmelCase_ = 250002 , UpperCAmelCase_ = 768 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 3072 , UpperCAmelCase_ = "gelu" , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 514 , UpperCAmelCase_ = 0.0_2 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1E-05 , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=0.0 , **UpperCAmelCase_ , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase =vocab_size
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_act
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =max_position_embeddings
lowerCamelCase =initializer_range
lowerCamelCase =layer_norm_eps
lowerCamelCase =classifier_dropout
lowerCamelCase =is_decoder
lowerCamelCase =act_dropout
| 362 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ : List[Any] =logging.get_logger(__name__)
UpperCAmelCase__ : Dict ={'''vocab_file''': '''spiece.model'''}
UpperCAmelCase__ : Dict ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
UpperCAmelCase__ : List[str] ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
UpperCAmelCase__ : Any =0
UpperCAmelCase__ : List[Any] =1
UpperCAmelCase__ : Union[str, Any] =2
UpperCAmelCase__ : Tuple =3
UpperCAmelCase__ : int =4
class __A ( a ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = """left"""
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<sep>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<cls>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=["<eop>", "<eod>"] , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
lowerCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
lowerCamelCase =3
lowerCamelCase =do_lower_case
lowerCamelCase =remove_space
lowerCamelCase =keep_accents
lowerCamelCase =vocab_file
lowerCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def _snake_case ( self ):
return len(self.sp_model )
def _snake_case ( self ):
lowerCamelCase ={self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase =self.__dict__.copy()
lowerCamelCase =None
return state
def __setstate__( self , UpperCAmelCase_ ):
lowerCamelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase ={}
lowerCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , UpperCAmelCase_ ):
if self.remove_space:
lowerCamelCase =""" """.join(inputs.strip().split() )
else:
lowerCamelCase =inputs
lowerCamelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase =unicodedata.normalize("""NFKD""" , UpperCAmelCase_ )
lowerCamelCase ="""""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
lowerCamelCase =outputs.lower()
return outputs
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =self.preprocess_text(UpperCAmelCase_ )
lowerCamelCase =self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
lowerCamelCase =[]
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase =cur_pieces[1:]
else:
lowerCamelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def _snake_case ( self , UpperCAmelCase_ ):
return self.sp_model.PieceToId(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase ="""""".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , """ """ ).strip()
return out_string
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , **UpperCAmelCase_ , ):
lowerCamelCase =kwargs.pop("""use_source_tokenizer""" , UpperCAmelCase_ )
lowerCamelCase =self.convert_ids_to_tokens(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase =[]
lowerCamelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase_ ) )
lowerCamelCase =[]
sub_texts.append(UpperCAmelCase_ )
else:
current_sub_text.append(UpperCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase ="""""".join(UpperCAmelCase_ )
lowerCamelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase =self.clean_up_tokenization(UpperCAmelCase_ )
return clean_text
else:
return text
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1]
return ([0] * len(UpperCAmelCase_ )) + [1, 1]
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase =os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
lowerCamelCase =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 262 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase : Union[str, Any] = logging.getLogger(__name__)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : int = '''masked_bert'''
def __init__( self , lowercase=3_0522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=0 , lowercase="topK" , lowercase="constant" , lowercase=0.0 , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase)
a__ : Optional[Any] = vocab_size
a__ : Union[str, Any] = hidden_size
a__ : Optional[int] = num_hidden_layers
a__ : List[Any] = num_attention_heads
a__ : Optional[Any] = hidden_act
a__ : List[Any] = intermediate_size
a__ : Optional[int] = hidden_dropout_prob
a__ : List[Any] = attention_probs_dropout_prob
a__ : Optional[int] = max_position_embeddings
a__ : Optional[int] = type_vocab_size
a__ : List[Any] = initializer_range
a__ : List[str] = layer_norm_eps
a__ : Optional[int] = pruning_method
a__ : int = mask_init
a__ : List[str] = mask_scale
| 99 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="dpr"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__ = 0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = projection_dim
lowerCAmelCase : Dict = position_embedding_type
| 108 | 0 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCamelCase = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCamelCase = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ : Any = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ : List[Any] = numpy_to_pil(_SCREAMING_SNAKE_CASE )
return images
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
if images.ndim == 3:
UpperCAmelCase_ : Dict = images[None, ...]
UpperCAmelCase_ : Optional[Any] = (images * 2_55).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ : int = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ : int = [Image.fromarray(_SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 67 |
'''simple docstring'''
from __future__ import annotations
_lowerCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
UpperCAmelCase_ : int = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the reference grid
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the action grid
UpperCAmelCase_ : Tuple = init[0]
UpperCAmelCase_ : List[Any] = init[1]
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase_ : List[str] = [[f, g, x, y]]
UpperCAmelCase_ : Tuple = False # flag that is set when search is complete
UpperCAmelCase_ : Union[str, Any] = False # flag set if we can't find expand
while not found and not resign:
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase_ : Dict = cell.pop()
UpperCAmelCase_ : Tuple = next_cell[2]
UpperCAmelCase_ : str = next_cell[3]
UpperCAmelCase_ : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase_ : Optional[Any] = True
else:
for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
UpperCAmelCase_ : Union[str, Any] = x + DIRECTIONS[i][0]
UpperCAmelCase_ : Optional[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase_ : Any = g + cost
UpperCAmelCase_ : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[str] = i
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = goal[0]
UpperCAmelCase_ : str = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase_ : Optional[int] = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase_ : Optional[int] = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase_ : Optional[Any] = xa
UpperCAmelCase_ : List[str] = ya
invpath.append([x, y] )
UpperCAmelCase_ : Tuple = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
_lowerCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_lowerCamelCase = [0, 0]
# all coordinates are given in format [y,x]
_lowerCamelCase = [len(grid) - 1, len(grid[0]) - 1]
_lowerCamelCase = 1
# the cost map which pushes the path closer to the goal
_lowerCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_lowerCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_lowerCamelCase = 99
_lowerCamelCase , _lowerCamelCase = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 67 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="resnet50" , UpperCAmelCase=3 , UpperCAmelCase=3_2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , ) -> str:
_lowercase =parent
_lowercase =out_indices if out_indices is not None else [4]
_lowercase =stage_names
_lowercase =out_features
_lowercase =backbone
_lowercase =batch_size
_lowercase =image_size
_lowercase =num_channels
_lowercase =use_pretrained_backbone
_lowercase =is_training
def __A (self ) -> Tuple:
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =self.get_config()
return config, pixel_values
def __A (self ) -> Optional[Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_lowercase =TimmBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def __A (self ) -> List[str]:
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase =config_and_inputs
_lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Optional[int]:
_lowercase =TimmBackboneModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def __A (self ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A (self ) -> Optional[Any]:
_lowercase ='''resnet18'''
_lowercase ='''microsoft/resnet-18'''
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase , use_timm_backbone=UpperCAmelCase )
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase , use_timm_backbone=UpperCAmelCase , out_indices=[1, 2, 3] )
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __A (self ) -> Optional[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __A (self ) -> List[Any]:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A (self ) -> Any:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __A (self ) -> Any:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A (self ) -> Optional[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A (self ) -> int:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __A (self ) -> int:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __A (self ) -> List[str]:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __A (self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A (self ) -> List[Any]:
pass
def __A (self ) -> str:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =True
_lowercase =self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase =self.all_model_classes[0]
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
_lowercase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_lowercase =model(**UpperCAmelCase )
_lowercase =outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __A (self ) -> str:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(**UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase =copy.deepcopy(UpperCAmelCase )
_lowercase =None
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(**UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_lowercase =copy.deepcopy(UpperCAmelCase )
_lowercase =False
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(**UpperCAmelCase )
| 5 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> Any:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase_ ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Tuple = nn.BatchNormad(4 )
UpperCAmelCase : int = nn.Linear(4 , 5 )
def _lowercase( self , A ) -> Any:
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A , [128, 64, 32, 16, 8] )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _lowercase( self ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A ):
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A , A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _lowercase( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
UpperCAmelCase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A )
UpperCAmelCase : Tuple = release_memory(A )
self.assertEqual(torch.cuda.memory_allocated() , A )
| 265 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A : Union[str, Any] = 16
__A : List[Any] = 32
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
return int(x / 2**2_0 )
class lowerCamelCase :
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCamelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self , *SCREAMING_SNAKE_CASE_ ):
gc.collect()
torch.cuda.empty_cache()
UpperCamelCase : Optional[int] = torch.cuda.memory_allocated()
UpperCamelCase : List[Any] = torch.cuda.max_memory_allocated()
UpperCamelCase : Any = bamb(self.end - self.begin )
UpperCamelCase : Optional[int] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A_ ( snake_case_ : Accelerator ,snake_case_ : int = 1_6 ,snake_case_ : str = "bert-base-cased" ,snake_case_ : int = 3_2_0 ,snake_case_ : int = 1_6_0 ,):
'''simple docstring'''
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(snake_case_ )
UpperCamelCase : Tuple = load_dataset(
"""glue""" ,"""mrpc""" ,split={"""train""": f'train[:{n_train}]', """validation""": f'validation[:{n_val}]'} )
def tokenize_function(snake_case_ : Dict ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Any = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case_ ,max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase : int = datasets.map(
snake_case_ ,batched=snake_case_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : str = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(snake_case_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ ,padding="""max_length""" ,max_length=1_2_8 ,return_tensors="""pt""" )
return tokenizer.pad(snake_case_ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase : Any = DataLoader(
tokenized_datasets["""train"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ )
UpperCamelCase : int = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def A_ ( snake_case_ : int ,snake_case_ : Any ):
'''simple docstring'''
UpperCamelCase : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Optional[int] = config["""lr"""]
UpperCamelCase : int = int(config["""num_epochs"""] )
UpperCamelCase : Optional[Any] = int(config["""seed"""] )
UpperCamelCase : str = int(config["""batch_size"""] )
UpperCamelCase : str = args.model_name_or_path
set_seed(snake_case_ )
UpperCamelCase : str = get_dataloaders(snake_case_ ,snake_case_ ,snake_case_ ,args.n_train ,args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(snake_case_ ,return_dict=snake_case_ )
# Instantiate optimizer
UpperCamelCase : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase : List[Any] = optimizer_cls(params=model.parameters() ,lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase : Any = 1
UpperCamelCase : Union[str, Any] = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=snake_case_ ,num_warmup_steps=0 ,num_training_steps=snake_case_ ,)
else:
UpperCamelCase : Tuple = DummyScheduler(snake_case_ ,total_num_steps=snake_case_ ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase : List[Any] = accelerator.prepare(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase : Dict = 0
# Now we train the model
UpperCamelCase : List[str] = {}
for epoch in range(snake_case_ ,snake_case_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case_ ):
UpperCamelCase : Optional[Any] = model(**snake_case_ )
UpperCamelCase : Dict = outputs.loss
UpperCamelCase : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCamelCase : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"""peak_memory_utilization.json""" ) ,"""w""" ) as f:
json.dump(snake_case_ ,snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=snake_case_ ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=snake_case_ ,)
parser.add_argument(
"""--output_dir""" ,type=snake_case_ ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--peak_memory_upper_bound""" ,type=snake_case_ ,default=snake_case_ ,help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" ,)
parser.add_argument(
"""--n_train""" ,type=snake_case_ ,default=3_2_0 ,help="""Number of training examples to use.""" ,)
parser.add_argument(
"""--n_val""" ,type=snake_case_ ,default=1_6_0 ,help="""Number of validation examples to use.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=snake_case_ ,default=1 ,help="""Number of train epochs.""" ,)
UpperCamelCase : List[str] = parser.parse_args()
UpperCamelCase : str = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(snake_case_ ,snake_case_ )
if __name__ == "__main__":
main()
| 355 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowercase : Any = AudioLDMPipeline
lowercase : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
lowercase : List[str] = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase : Tuple = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def a_ ( self ):
torch.manual_seed(0 )
UpperCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase : int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCamelCase : Optional[int] = ClapTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
UpperCamelCase : Tuple = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a_ ( self ):
UpperCamelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Any = self.get_dummy_components()
UpperCamelCase : int = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 256
UpperCamelCase : Tuple = audio[:10]
UpperCamelCase : Dict = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase : List[str] = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
UpperCamelCase : Tuple = prompt_embeds
# forward
UpperCamelCase : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : List[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 3 * ["""this is a negative prompt"""]
UpperCamelCase : List[Any] = negative_prompt
UpperCamelCase : str = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase : str = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
UpperCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCamelCase : int = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Union[str, Any] = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
embeds.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Tuple = embeds
# forward
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : List[str] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = """egg cracking"""
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 256
UpperCamelCase : Union[str, Any] = audio[:10]
UpperCamelCase : Dict = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Tuple = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase : List[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase : Dict = 2
UpperCamelCase : List[str] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase : List[str] = 2
UpperCamelCase : Optional[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase : Any = 2
UpperCamelCase : str = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = audioldm_pipe(audio_length_in_s=0.016 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.016
UpperCamelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.032
def a_ ( self ):
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Optional[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = ["""hey"""]
UpperCamelCase : Dict = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase : str = output.audios.shape
assert audio_shape == (1, 256)
UpperCamelCase : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase : str = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a_ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@slow
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" , SCREAMING_SNAKE_CASE_=torch.floataa , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 8, 128, 16) )
UpperCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a_ ( self ):
UpperCamelCase : Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase : List[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 25
UpperCamelCase : Optional[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_1920
UpperCamelCase : Union[str, Any] = audio[7_7230:7_7240]
UpperCamelCase : Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCamelCase : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def a_ ( self ):
UpperCamelCase : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase : str = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_1920
UpperCamelCase : Union[str, Any] = audio[2_7780:2_7790]
UpperCamelCase : Tuple = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCamelCase : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 27 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__UpperCamelCase : Dict[Optional[str], str] = {}
__UpperCamelCase : Dict[Optional[str], Exception] = {}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ) -> Optional[int]:
a = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
a = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
a = format_type
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ) -> List[str]:
a = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
a = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
__UpperCamelCase : str = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
__UpperCamelCase : List[str] = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
__UpperCamelCase : List[str] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def __A ( __lowerCamelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __A ( __lowerCamelCase , **__lowerCamelCase ) -> Formatter:
a = get_format_type_from_alias(__lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 228 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__UpperCamelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __A ( __lowerCamelCase ) -> List[str]:
a = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
a = line.strip()
if line:
a = line.split()
a = line_number
a = words[0]
a = value
return result
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
for attribute in key.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
a = PARAM_MAPPING[full_name.split(""".""" )[-1]]
a = """param"""
if weight_type is not None and weight_type != "param":
a = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
a = hf_pointer
for attribute in hf_param_name.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = shape_pointer.shape
# let's reduce dimension
a = value[0]
else:
a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = value
else:
a = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
a = PARAM_MAPPING[full_name.split(""".""" )[-1]]
a = """param"""
if weight_type is not None and weight_type != "param":
a = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a = """.""".join([key, hf_param_name] )
else:
a = key
a = value if """lm_head""" in full_key else value[0]
__UpperCamelCase : List[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> Optional[Any]:
a = False
for key, mapped_key in MAPPING.items():
a = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a = True
if "*" in mapped_key:
a = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
a = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
a = """weight_g"""
elif "weight_v" in name:
a = """weight_v"""
elif "bias" in name:
a = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a = """weight"""
else:
a = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
a = []
a = fairseq_model.state_dict()
a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
a = True
else:
a = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = full_name.split("""conv_layers.""" )[-1]
a = name.split(""".""" )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=False ) -> List[Any]:
if config_path is not None:
a = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
a = WavaVecaConfig()
if is_seq_class:
a = read_txt_into_dict(__lowerCamelCase )
a = idalabel
a = WavaVecaForSequenceClassification(__lowerCamelCase )
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
a = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a = target_dict.pad_index
a = target_dict.bos_index
a = target_dict.eos_index
a = len(target_dict.symbols )
a = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
a = target_dict.indices
# fairseq has the <pad> and <s> switched
a = 0
a = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
a = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
a = True if config.feat_extract_norm == """layer""" else False
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
a = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
a = WavaVecaForCTC(__lowerCamelCase )
else:
a = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
a = argparse.Namespace(task="""audio_pretraining""" )
a = fairseq.tasks.setup_task(__lowerCamelCase )
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
a = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
__UpperCamelCase : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 228 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
lowercase__ = []
for old_item in old_list:
lowercase__ = old_item.replace("in_layers.0" , "norm1" )
lowercase__ = new_item.replace("in_layers.2" , "conv1" )
lowercase__ = new_item.replace("out_layers.0" , "norm2" )
lowercase__ = new_item.replace("out_layers.3" , "conv2" )
lowercase__ = new_item.replace("emb_layers.1" , "time_emb_proj" )
lowercase__ = new_item.replace("skip_connection" , "conv_shortcut" )
lowercase__ = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
lowercase__ = []
for old_item in old_list:
lowercase__ = old_item
lowercase__ = new_item.replace("norm.weight" , "group_norm.weight" )
lowercase__ = new_item.replace("norm.bias" , "group_norm.bias" )
lowercase__ = new_item.replace("proj_out.weight" , "proj_attn.weight" )
lowercase__ = new_item.replace("proj_out.bias" , "proj_attn.bias" )
lowercase__ = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase__ = old_checkpoint[path]
lowercase__ = old_tensor.shape[0] // 3
lowercase__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase__ = old_tensor.shape[0] // config["num_head_channels"] // 3
lowercase__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase__ , lowercase__ , lowercase__ = old_tensor.split(channels // num_heads , dim=1 )
lowercase__ = query.reshape(SCREAMING_SNAKE_CASE_ )
lowercase__ = key.reshape(SCREAMING_SNAKE_CASE_ )
lowercase__ = value.reshape(SCREAMING_SNAKE_CASE_ )
for path in paths:
lowercase__ = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase__ = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
lowercase__ = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
lowercase__ = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase__ = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase__ = old_checkpoint[path["old"]][:, :, 0]
else:
lowercase__ = old_checkpoint[path["old"]]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {}
lowercase__ = checkpoint["time_embed.0.weight"]
lowercase__ = checkpoint["time_embed.0.bias"]
lowercase__ = checkpoint["time_embed.2.weight"]
lowercase__ = checkpoint["time_embed.2.bias"]
lowercase__ = checkpoint["input_blocks.0.0.weight"]
lowercase__ = checkpoint["input_blocks.0.0.bias"]
lowercase__ = checkpoint["out.0.weight"]
lowercase__ = checkpoint["out.0.bias"]
lowercase__ = checkpoint["out.2.weight"]
lowercase__ = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowercase__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the middle blocks only
lowercase__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the output blocks only
lowercase__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
lowercase__ = (i - 1) // (config["num_res_blocks"] + 1)
lowercase__ = (i - 1) % (config["num_res_blocks"] + 1)
lowercase__ = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
lowercase__ = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowercase__ = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
lowercase__ = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
lowercase__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowercase__ = {"old": f'''input_blocks.{i}.0''', "new": f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowercase__ = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ):
lowercase__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowercase__ = {
"old": f'''input_blocks.{i}.1''',
"new": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase__ = {
f'''input_blocks.{i}.1.qkv.bias''': {
"key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
"key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , )
lowercase__ = middle_blocks[0]
lowercase__ = middle_blocks[1]
lowercase__ = middle_blocks[2]
lowercase__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowercase__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowercase__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowercase__ = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ = i // (config["num_res_blocks"] + 1)
lowercase__ = i % (config["num_res_blocks"] + 1)
lowercase__ = [shave_segments(SCREAMING_SNAKE_CASE_ , 2 ) for name in output_blocks[i]]
lowercase__ = {}
for layer in output_block_layers:
lowercase__ , lowercase__ = layer.split("." )[0], shave_segments(SCREAMING_SNAKE_CASE_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = [layer_name]
if len(SCREAMING_SNAKE_CASE_ ) > 1:
lowercase__ = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
lowercase__ = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
lowercase__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowercase__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowercase__ = {"old": f'''output_blocks.{i}.0''', "new": f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase__ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowercase__ = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
lowercase__ = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(SCREAMING_SNAKE_CASE_ ) == 2:
lowercase__ = []
if len(SCREAMING_SNAKE_CASE_ ):
lowercase__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowercase__ = {
"old": f'''output_blocks.{i}.1''',
"new": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase__ = {
f'''output_blocks.{i}.1.qkv.bias''': {
"key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
"key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE_ , )
else:
lowercase__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase__ = ".".join(["output_blocks", str(SCREAMING_SNAKE_CASE_ ), path["old"]] )
lowercase__ = ".".join(["up_blocks", str(SCREAMING_SNAKE_CASE_ ), "resnets", str(SCREAMING_SNAKE_CASE_ ), path["new"]] )
lowercase__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowercase_ = parser.parse_args()
lowercase_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowercase_ = json.loads(f.read())
lowercase_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowercase_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowercase_ = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
lowercase_ = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
lowercase_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 224 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowercase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
lowercase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[int] ="""whisper"""
UpperCamelCase__ : Optional[int] =["""past_key_values"""]
UpperCamelCase__ : Optional[Any] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any], __lowercase : List[str]=5_1865, __lowercase : Dict=80, __lowercase : List[Any]=6, __lowercase : Union[str, Any]=4, __lowercase : Tuple=6, __lowercase : Dict=4, __lowercase : List[Any]=1536, __lowercase : Tuple=1536, __lowercase : Any=0.0, __lowercase : List[str]=0.0, __lowercase : List[Any]=5_0257, __lowercase : List[str]=True, __lowercase : str=True, __lowercase : int="gelu", __lowercase : Tuple=256, __lowercase : Tuple=0.0, __lowercase : List[Any]=0.0, __lowercase : Optional[int]=0.0, __lowercase : List[Any]=0.02, __lowercase : Union[str, Any]=False, __lowercase : str=1500, __lowercase : Optional[int]=448, __lowercase : Optional[Any]=5_0256, __lowercase : Tuple=5_0256, __lowercase : Any=5_0256, __lowercase : Union[str, Any]=None, __lowercase : Any=[220, 5_0256], __lowercase : List[Any]=False, __lowercase : int=256, __lowercase : int=False, __lowercase : Tuple=0.05, __lowercase : int=10, __lowercase : Dict=2, __lowercase : List[Any]=0.0, __lowercase : Optional[int]=10, __lowercase : Union[str, Any]=0, __lowercase : Tuple=7, **__lowercase : Union[str, Any], ):
lowercase__ = vocab_size
lowercase__ = num_mel_bins
lowercase__ = d_model
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_ffn_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
lowercase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
lowercase__ = median_filter_width
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, is_encoder_decoder=__lowercase, decoder_start_token_id=__lowercase, suppress_tokens=__lowercase, begin_suppress_tokens=__lowercase, **__lowercase, )
class _snake_case ( lowercase__):
@property
def A__ ( self : str ):
lowercase__ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ = {0: "batch"}
else:
lowercase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowercase, direction="inputs" )
return common_inputs
def A__ ( self : int, __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], __lowercase : int = -1, __lowercase : int = -1, __lowercase : bool = False, __lowercase : Optional["TensorType"] = None, __lowercase : int = 2_2050, __lowercase : float = 5.0, __lowercase : int = 220, ):
lowercase__ = OrderedDict()
lowercase__ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=__lowercase, framework=__lowercase, sampling_rate=__lowercase, time_duration=__lowercase, frequency=__lowercase, )
lowercase__ = encoder_inputs["input_features"].shape[2]
lowercase__ = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ = super().generate_dummy_inputs(
preprocessor.tokenizer, __lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = encoder_inputs.pop("input_features" )
lowercase__ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase__ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def A__ ( self : int ):
return 1e-3
| 224 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowercase : list[float] , __lowercase : list[float] ) -> float:
'''simple docstring'''
_UpperCAmelCase = sorted(numsa + numsa )
_UpperCAmelCase , _UpperCAmelCase = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE :Tuple = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__SCREAMING_SNAKE_CASE :Any = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 22 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" )
__snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
__snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
__snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
__snake_case : Any = -(labels.shape[-1] * loss.item())
__snake_case : List[str] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 326 | 0 |
from __future__ import annotations
def __a ( lowerCAmelCase_ : list[int | float] ,lowerCAmelCase_ : int ,lowerCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
if len(__a ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(__a )
or left < -len(__a )
or right >= len(__a )
or right < -len(__a )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
UpperCAmelCase_= (left + right) >> 1 # the middle
UpperCAmelCase_= find_max(__a ,__a ,__a ) # find max in range[left, mid]
UpperCAmelCase_= find_max(__a ,mid + 1 ,__a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 350 |
import pytest
import datasets
# Import fixture modules as plugins
__A = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def __a ( lowerCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
config.addinivalue_line("""markers""" ,"""torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_= tmp_path_factory.getbasetemp() / """cache"""
UpperCAmelCase_= test_hf_cache_home / """datasets"""
UpperCAmelCase_= test_hf_cache_home / """metrics"""
UpperCAmelCase_= test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" ,str(lowerCAmelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" ,str(lowerCAmelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" ,str(lowerCAmelCase_ ) )
UpperCAmelCase_= test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" ,str(lowerCAmelCase_ ) )
UpperCAmelCase_= test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" ,str(lowerCAmelCase_ ) )
@pytest.fixture(autouse=lowerCAmelCase_ ,scope="""session""" )
def __a ( ) -> Optional[int]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" ,lowerCAmelCase_ )
@pytest.fixture
def __a ( lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" ,lowerCAmelCase_ )
| 277 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__a = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_a ) , torch_builtin(_a ) ) )
self.assertFalse(torch.allclose(gelu_python(_a ) , gelu_new(_a ) ) )
def __UpperCAmelCase ( self ):
__a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__a = get_activation('''gelu''' )
__a = get_activation('''gelu_10''' )
__a = torch_builtin(_a )
__a = geluaa(_a )
__a = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __UpperCAmelCase ( self ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_a ):
get_activation('''bogus''' )
with self.assertRaises(_a ):
get_activation(_a )
def __UpperCAmelCase ( self ):
__a = get_activation('''gelu''' )
__a = 1
__a = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_a ):
__a = acta.a
| 45 |
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCAmelCase = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
_lowerCAmelCase = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCAmelCase = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCAmelCase = sorted(arg_to_scheduler.keys())
_lowerCAmelCase = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class _SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self : List[Any] , a__ : argparse.Namespace , a__ : Any=None , a__ : Tuple="base" , a__ : int=None , a__ : List[str]=None , a__ : Any=None , **a__ : List[str] , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(a__ )
__magic_name__ = 0
__magic_name__ = Path(self.hparams.output_dir )
__magic_name__ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=a__ , **a__ , )
else:
__magic_name__ = config
__magic_name__ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , a__ , a__ ):
assert hasattr(self.config , a__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , a__ , getattr(self.hparams , a__ ) )
if tokenizer is None:
__magic_name__ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=a__ , )
else:
__magic_name__ = tokenizer
__magic_name__ = MODEL_MODES[mode]
if model is None:
__magic_name__ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=a__ , )
else:
__magic_name__ = model
def snake_case__ ( self : Any , *a__ : Optional[Any] , **a__ : Any ):
__magic_name__ = self.model_type.from_pretrained(*a__ , **a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def snake_case__ ( self : str ):
__magic_name__ = self.model
__magic_name__ = ['''bias''', '''LayerNorm.weight''']
__magic_name__ = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ = Adafactor(
a__ , lr=self.hparams.learning_rate , scale_parameter=a__ , relative_step=a__ )
else:
__magic_name__ = AdamW(
a__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ = optimizer
__magic_name__ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def snake_case__ ( self : List[Any] , a__ : List[str] , a__ : List[str] ):
return self.validation_step(a__ , a__ )
def snake_case__ ( self : str , a__ : Any ):
return self.validation_end(a__ )
def snake_case__ ( self : List[str] ):
__magic_name__ = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def snake_case__ ( self : Union[str, Any] , a__ : List[Any] ):
if stage == "test":
__magic_name__ = len(self.test_dataloader().dataset )
else:
__magic_name__ = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=a__ )
__magic_name__ = len(self.train_dataloader().dataset )
def snake_case__ ( self : List[str] , a__ : str , a__ : int , a__ : bool = False ):
raise NotImplementedError('''You must implement this for your task''' )
def snake_case__ ( self : Any ):
return self.train_loader
def snake_case__ ( self : str ):
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=a__ )
def snake_case__ ( self : Dict ):
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=a__ )
def snake_case__ ( self : str , a__ : Any ):
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
a__ , list(filter(a__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def snake_case__ ( self : Optional[int] , a__ : Dict[str, Any] ):
__magic_name__ = self.output_dir.joinpath('''best_tfmr''' )
__magic_name__ = self.step_count
self.model.save_pretrained(a__ )
self.tokenizer.save_pretrained(a__ )
@staticmethod
def snake_case__ ( a__ : List[Any] , a__ : Any ):
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=a__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(a__ ).parent / '''test_run''' / '''cache''' ) , type=a__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=a__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=a__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=a__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=a__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=a__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=a__ , metavar=a__ , type=a__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=a__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=a__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=a__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=a__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=a__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=a__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=a__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class _SCREAMING_SNAKE_CASE ( pl.Callback ):
def snake_case__ ( self : str , a__ : Dict , a__ : List[Any] ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _SCREAMING_SNAKE_CASE ( pl.Callback ):
def snake_case__ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(a__ )
class _SCREAMING_SNAKE_CASE ( pl.Callback ):
def snake_case__ ( self : Tuple , a__ : Dict , a__ : Optional[Any] ):
__magic_name__ = trainer.lr_schedulers[0]['''scheduler''']
__magic_name__ = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(a__ )
def snake_case__ ( self : Optional[int] , a__ : pl.Trainer , a__ : pl.LightningModule ):
rank_zero_info('''***** Validation results *****''' )
__magic_name__ = trainer.callback_metrics
# Log results
for key in sorted(a__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
def snake_case__ ( self : Union[str, Any] , a__ : pl.Trainer , a__ : pl.LightningModule ):
rank_zero_info('''***** Test results *****''' )
__magic_name__ = trainer.callback_metrics
# Log and save results to file
__magic_name__ = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(a__ , '''w''' ) as writer:
for key in sorted(a__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
def UpperCamelCase ( a , a ) -> None:
'''simple docstring'''
parser.add_argument(
'''--output_dir''' , default=str(Path(a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=a , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def UpperCamelCase ( a , a , a=None , a=True , a=[] , a=None , a=None , **a , ) -> Union[str, Any]:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
__magic_name__ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a )
if logging_callback is None:
__magic_name__ = LoggingCallback()
__magic_name__ = {}
if args.fpaa:
__magic_name__ = 16
if args.gpus > 1:
__magic_name__ = '''auto'''
__magic_name__ = '''ddp'''
__magic_name__ = args.accumulate_grad_batches
__magic_name__ = None
__magic_name__ = '''auto'''
__magic_name__ = pl.Trainer.from_argparse_args(
a , weights_summary=a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a , val_check_interval=1 , num_sanity_val_steps=2 , **a , )
if args.do_train:
trainer.fit(a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 368 |
'''simple docstring'''
import functools
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
__magic_name__ = len(a )
__magic_name__ = len(a )
@functools.cache
def min_distance(a , a ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__magic_name__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a ) , 1 + min_distance(a , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def A ( self : List[Any] ) -> Tuple:
lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowercase_ : Tuple = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowercase_ : List[str] = load_dataset('''nielsr/rvlcdip-demo''' )
lowercase_ : Tuple = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowercase_ : Tuple = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ : Dict = model(**UpperCamelCase__ )
lowercase_ : List[Any] = outputs.logits
lowercase_ : Any = torch.Size((1, 16) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowercase_ : Optional[int] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=UpperCamelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 33 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = torch.Size((1, 16) )
self.assertEqual(logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, )
self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 278 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __a ( A__ ):
_lowerCAmelCase : str = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowerCAmelCase : ClassVar[Features] = Features({} )
_lowerCAmelCase : str = "text"
@property
def __lowercase ( self : str ):
'''simple docstring'''
return {self.text_column: "text"} | 366 |
lowerCamelCase : Optional[int] ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
UpperCamelCase__ : Optional[Any] = set()
# keep track of all the paths to be checked
UpperCamelCase__ : Optional[Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase__ : int = queue.pop(0 )
# get the last node from the path
UpperCamelCase__ : Dict = path[-1]
if node not in explored:
UpperCamelCase__ : Tuple = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase__ : List[str] = list(__lowerCAmelCase )
new_path.append(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__lowerCAmelCase )
# in case there's no path between the 2 nodes
return []
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase__ : Tuple = [start]
UpperCamelCase__ : Optional[int] = set(__lowerCAmelCase )
# Keep tab on distances from `start` node.
UpperCamelCase__ : str = {start: 0, target: -1}
while queue:
UpperCamelCase__ : Any = queue.pop(0 )
if node == target:
UpperCamelCase__ : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
UpperCamelCase__ : List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4 | 196 | 0 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __magic_name__ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Any = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A_ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
A_ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[int] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
A_ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
A_ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A_ : Any = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
A_ : Any = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
A_ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : int = self.get_dummy_components()
A_ : int = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Union[str, Any] = self.get_dummy_inputs(_a )
A_ : Optional[Any] = inputs["prompt"]
A_ : Any = inputs["generator"]
A_ : str = inputs["num_inference_steps"]
A_ : Optional[Any] = inputs["output_type"]
if "image" in inputs:
A_ : str = inputs["image"]
else:
A_ : int = None
if "mask_image" in inputs:
A_ : Optional[Any] = inputs["mask_image"]
else:
A_ : Union[str, Any] = None
if "original_image" in inputs:
A_ : Optional[int] = inputs["original_image"]
else:
A_ : Dict = None
A_ , A_ : List[str] = pipe.encode_prompt(_a )
# inputs with prompt converted to embeddings
A_ : List[Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
A_ : List[str] = image
if mask_image is not None:
A_ : int = mask_image
if original_image is not None:
A_ : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_a , _a , _a )
A_ : Optional[Any] = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
A_ : List[str] = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_a , _a ) is None , f"`{optional_component}` did not stay set to None after loading." , )
A_ : Dict = self.get_dummy_inputs(_a )
A_ : Optional[Any] = inputs["generator"]
A_ : int = inputs["num_inference_steps"]
A_ : Tuple = inputs["output_type"]
# inputs with prompt converted to embeddings
A_ : Optional[int] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
A_ : List[Any] = image
if mask_image is not None:
A_ : Optional[Any] = mask_image
if original_image is not None:
A_ : str = original_image
A_ : Union[str, Any] = pipe_loaded(**_a )[0]
A_ : Tuple = np.abs(to_np(_a ) - to_np(_a ) ).max()
self.assertLess(_a , 1e-4 )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[int] = self.get_dummy_components()
A_ : List[str] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : List[Any] = self.get_dummy_inputs(_a )
A_ : Optional[int] = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
A_ : int = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A_ : Any = self.get_dummy_inputs(_a )
A_ : List[str] = pipe_loaded(**_a )[0]
A_ : Dict = np.abs(to_np(_a ) - to_np(_a ) ).max()
self.assertLess(_a , 1e-4 )
| 300 |
"""simple docstring"""
lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__a = year // 100
__a = (5 * (century % 4) + 2) % 7
__a = year % 100
__a = centurian % 12
__a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __a , unittest.TestCase ):
UpperCamelCase__ : Tuple =LongformerTokenizer
UpperCamelCase__ : Any =True
UpperCamelCase__ : Dict =LongformerTokenizerFast
UpperCamelCase__ : List[str] =True
def __a ( self :Optional[Any]) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__))))
UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(UpperCamelCase__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(UpperCamelCase__))
def __a ( self :Optional[int] , **_lowercase :str) -> Dict:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def __a ( self :List[str] , **_lowercase :Optional[int]) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def __a ( self :List[str] , _lowercase :List[str]) -> Optional[Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCAmelCase_ = tokenizer.tokenize(UpperCamelCase__) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , UpperCamelCase__)
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase__) , [0, 31414, 232, 328, 2])
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase__) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''')
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __a ( self :int) -> Dict:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = '''Encode this sequence.'''
UpperCAmelCase_ = tokenizer.byte_encoder[''' '''.encode('''utf-8''')[0]]
# Testing encoder arguments
UpperCAmelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__)
UpperCAmelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''})
UpperCAmelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__)
# Testing spaces after special tokens
UpperCAmelCase_ = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__)}) # mask token has a left space
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__)
UpperCAmelCase_ = '''Encode <mask> sequence'''
UpperCAmelCase_ = '''Encode <mask>sequence'''
UpperCAmelCase_ = tokenizer.encode(UpperCamelCase__)
UpperCAmelCase_ = encoded.index(UpperCamelCase__)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
UpperCAmelCase_ = tokenizer.encode(UpperCamelCase__)
UpperCAmelCase_ = encoded.index(UpperCamelCase__)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__)
def __a ( self :int) -> Dict:
pass
def __a ( self :Union[str, Any]) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids''']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
def __a ( self :Union[str, Any]) -> Optional[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
UpperCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase__)
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase__)
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase__)
def __a ( self :List[Any]) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
UpperCAmelCase_ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ = f"{text_of_1_token} {text_of_1_token}"
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__) + 1, len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__) + 1, len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__), len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__), len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__) + 1, 1 + len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__), 1 + len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__), 1 + len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 0 |
'''simple docstring'''
import math
def lowerCamelCase ( __lowerCamelCase : int ) ->str:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
while num > 0:
_SCREAMING_SNAKE_CASE = num % 8
_SCREAMING_SNAKE_CASE = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
_SCREAMING_SNAKE_CASE = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(__lowerCamelCase )}'
def lowerCamelCase ( ) ->None:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 58 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """longformer"""
def __init__( self ,a_ = 512 ,a_ = 2 ,a_ = 1 ,a_ = 0 ,a_ = 2 ,a_ = 30_522 ,a_ = 768 ,a_ = 12 ,a_ = 12 ,a_ = 3_072 ,a_ = "gelu" ,a_ = 0.1 ,a_ = 0.1 ,a_ = 512 ,a_ = 2 ,a_ = 0.02 ,a_ = 1E-1_2 ,a_ = False ,**a_ ,) -> List[Any]:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = attention_window
_UpperCAmelCase : Any = sep_token_id
_UpperCAmelCase : Dict = bos_token_id
_UpperCAmelCase : Tuple = eos_token_id
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = onnx_export
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = "default" ,a_ = None ) -> int:
super().__init__(a_ ,a_ ,a_ )
_UpperCAmelCase : Tuple = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCAmelCase : str = super().outputs
if self.task == "default":
_UpperCAmelCase : int = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset ,14 )
def _snake_case ( self ,a_ ,a_ = -1 ,a_ = -1 ,a_ = False ,a_ = None ,) -> Mapping[str, Any]:
_UpperCAmelCase : List[str] = super().generate_dummy_inputs(
preprocessor=a_ ,batch_size=a_ ,seq_length=a_ ,is_pair=a_ ,framework=a_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_UpperCAmelCase : int = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
_UpperCAmelCase : List[str] = 1
return inputs
| 215 | 0 |
class A :
'''simple docstring'''
def __init__(self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Tuple ) -> List[str]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> List[str]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : int ) -> int:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Any ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 146 |
def UpperCamelCase ( __magic_name__ : str ) -> List[str]: # noqa: E741
"""simple docstring"""
lowercase__ = len(__magic_name__ )
lowercase__ = 0
lowercase__ = [0] * n
lowercase__ = [False] * n
lowercase__ = [False] * n
def dfs(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Any ):
if parent == root:
out_edge_count += 1
lowercase__ = True
lowercase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase__ = dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase__ = True
# AP found via cycle
if at == low[to]:
lowercase__ = True
else:
lowercase__ = min(low[at] , __magic_name__ )
return out_edge_count
for i in range(__magic_name__ ):
if not visited[i]:
lowercase__ = 0
lowercase__ = dfs(__magic_name__ , __magic_name__ , -1 , __magic_name__ )
lowercase__ = out_edge_count > 1
for x in range(len(__magic_name__ ) ):
if is_art[x] is True:
print(__magic_name__ )
# Adjacency list of graph
A : List[str] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 146 | 1 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def snake_case__ ( _A: Tuple , _A: float = 0.0 , _A: float = 1.0 ) -> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding='utf-8', check=A, )
assert hasattr(self, 'env' )
def UpperCamelCase_ ( self, A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"{self.env.base_job_name}-single", instance_count=A, instance_type=self.instance_type, debugger_hook_config=A, hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version='py36', )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
TrainingJobAnalytics(A ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds', 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json", 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss}, A )
| 371 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _a :
'''simple docstring'''
A : Tuple = BlenderbotSmallConfig
A : Optional[int] = {}
A : Any = '''gelu'''
def __init__( self, A, A=13, A=7, A=True, A=False, A=99, A=32, A=2, A=4, A=37, A=0.1, A=0.1, A=20, A=2, A=1, A=0, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
SCREAMING_SNAKE_CASE : List[str] = pad_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = bos_token_id
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE : str = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE : List[str] = prepare_blenderbot_small_inputs_dict(A, A, A )
return config, inputs_dict
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TFBlenderbotSmallModel(config=A ).get_decoder()
SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE : List[Any] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE : Dict = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE : int = 1
# first forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(A, attention_mask=A, head_mask=A, use_cache=A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE : str = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE : Any = model(A, attention_mask=A )[0]
SCREAMING_SNAKE_CASE : List[str] = model(A, attention_mask=A, past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : List[str] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A, A, rtol=1E-3 )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: int ,__UpperCamelCase: Optional[Any]=None ,__UpperCamelCase: List[str]=None ,__UpperCamelCase: int=None ,__UpperCamelCase: Any=None ,__UpperCamelCase: Union[str, Any]=None ,):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__UpperCamelCase ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[str] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
A : List[str] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
A : List[str] = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
A : int = True
A : Optional[int] = False
A : str = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self, config_class=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
A : List[Any] = '''facebook/blenderbot_small-90M'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer(self.src_text, return_tensors='tf' )
SCREAMING_SNAKE_CASE : int = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=A, )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 246 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE : List[str] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
SCREAMING_SNAKE_CASE : int = {
"facebook/blenderbot_small-90M": 512,
}
class _lowerCamelCase( _a ):
lowercase_ : Tuple = VOCAB_FILES_NAMES
lowercase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[Any] = BlenderbotSmallTokenizer
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="<|endoftext|>", lowerCamelCase="<|endoftext|>", lowerCamelCase="<|endoftext|>", lowerCamelCase=False, lowerCamelCase=True, **lowerCamelCase, ) -> Tuple:
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCamelCase, merges=lowerCamelCase, add_prefix_space=lowerCamelCase, trim_offsets=lowerCamelCase, ), bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, **lowerCamelCase, )
_lowercase : Tuple = add_prefix_space
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
_lowercase : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : Tuple = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 21 | import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =ort.SessionOptions()
UpperCAmelCase : Optional[int] =False
return options
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Dict ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : Any =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : str =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : int =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 348 | 0 |
'''simple docstring'''
from __future__ import annotations
lowercase__ = tuple[int, int, int]
lowercase__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase__ = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowercase__ = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowercase__ = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowercase__ = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowercase__ = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowercase__ = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowercase__ = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowercase__ = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowercase__ = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowercase__ = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
'''simple docstring'''
if (unique_rotsel := len(set(lowerCamelCase_ ) )) < 3:
snake_case : Any = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(lowerCamelCase_ )
# Checks if rotor positions are valid
snake_case : Union[str, Any] = rotpos
if not 0 < rotorposa <= len(lowerCamelCase_ ):
snake_case : List[Any] = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
snake_case : Optional[int] = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
snake_case : Union[str, Any] = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowerCamelCase_ )
# Validates string and returns dict
snake_case : str = _plugboard(lowerCamelCase_ )
return rotpos, rotsel, pbdict
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case : List[str] = F'Plugboard setting isn\'t type string ({type(lowerCamelCase_ )})'
raise TypeError(lowerCamelCase_ )
elif len(lowerCamelCase_ ) % 2 != 0:
snake_case : Any = F'Odd number of symbols ({len(lowerCamelCase_ )})'
raise Exception(lowerCamelCase_ )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
snake_case : Union[str, Any] = set()
for i in pbstring:
if i not in abc:
snake_case : int = F'\'{i}\' not in list of symbols'
raise Exception(lowerCamelCase_ )
elif i in tmppbl:
snake_case : List[str] = F'Duplicate symbol ({i})'
raise Exception(lowerCamelCase_ )
else:
tmppbl.add(lowerCamelCase_ )
del tmppbl
# Created the dictionary
snake_case : Optional[int] = {}
for j in range(0 , len(lowerCamelCase_ ) - 1 , 2 ):
snake_case : Any = pbstring[j + 1]
snake_case : Union[str, Any] = pbstring[j]
return pb
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE__ = "" , ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = text.upper()
snake_case : Union[str, Any] = _validator(
lowerCamelCase_ , lowerCamelCase_ , plugb.upper() )
snake_case : str = rotor_position
snake_case : Tuple = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
snake_case : Tuple = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
snake_case : Any = plugboard[symbol]
# rotor ra --------------------------
snake_case : Union[str, Any] = abc.index(lowerCamelCase_ ) + rotorposa
snake_case : Union[str, Any] = rotora[index % len(lowerCamelCase_ )]
# rotor rb --------------------------
snake_case : List[Any] = abc.index(lowerCamelCase_ ) + rotorposa
snake_case : Union[str, Any] = rotora[index % len(lowerCamelCase_ )]
# rotor rc --------------------------
snake_case : Optional[int] = abc.index(lowerCamelCase_ ) + rotorposa
snake_case : Optional[Any] = rotora[index % len(lowerCamelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
snake_case : Tuple = reflector[symbol]
# 2nd rotors
snake_case : Dict = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
snake_case : Any = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
snake_case : Tuple = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
snake_case : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
snake_case : List[str] = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
snake_case : str = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
snake_case : Union[str, Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCamelCase_ )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
lowercase__ = 'This is my Python script that emulates the Enigma machine from WWII.'
lowercase__ = (1, 1, 1)
lowercase__ = 'pictures'
lowercase__ = (rotora, rotora, rotora)
lowercase__ = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 355 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
lowercase__ = logging.getLogger(__name__)
lowercase__ = {"facebook/bart-base": BartForConditionalGeneration}
lowercase__ = {"facebook/bart-base": BartTokenizer}
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
'''--config_name''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=SCREAMING_SNAKE_CASE__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''Where to store the final ONNX file.''' )
snake_case : List[str] = parser.parse_args()
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="cpu" ) -> int:
'''simple docstring'''
snake_case : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case : Any = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ )
if model_name in ["facebook/bart-base"]:
snake_case : Dict = 0
snake_case : Optional[Any] = None
snake_case : int = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
'''simple docstring'''
model.eval()
snake_case : List[Any] = None
snake_case : Tuple = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE__ ) )
with torch.no_grad():
snake_case : Optional[int] = '''My friends are cool but they eat too many carbs.'''
snake_case : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
snake_case : Dict = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , early_stopping=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=SCREAMING_SNAKE_CASE__ , )
logger.info('''Model exported to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
snake_case : Any = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
snake_case : int = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = ort_sess.run(
SCREAMING_SNAKE_CASE__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(SCREAMING_SNAKE_CASE__ ),
'''max_length''': np.array(SCREAMING_SNAKE_CASE__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
snake_case : List[str] = parse_args()
snake_case : Tuple = 5
snake_case : int = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case : str = torch.device(args.device )
snake_case ,snake_case : Any = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(SCREAMING_SNAKE_CASE__ )
if args.max_length:
snake_case : Tuple = args.max_length
if args.num_beams:
snake_case : List[Any] = args.num_beams
if args.output_file_path:
snake_case : str = args.output_file_path
else:
snake_case : int = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 83 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.