code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
_A = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_A = json.loads(__snake_case )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_A = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_A = json.loads(__snake_case )
if not mpi_options.get('sagemaker_mpi_enabled' , __snake_case ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.', UpperCamelCase__, )
@cached_property
def __UpperCAmelCase ( self : str ) -> "torch.device":
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
_A = torch.device('cpu' )
_A = 0
elif is_sagemaker_model_parallel_available():
_A = smp.local_rank()
_A = torch.device('cuda', UpperCamelCase__ )
_A = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp', timeout=self.ddp_timeout_delta )
_A = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
_A = torch.device('cuda', self.local_rank )
_A = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_A = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_A = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl', timeout=self.ddp_timeout_delta )
_A = torch.device('cuda', self.local_rank )
_A = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCamelCase__ )
return device
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __UpperCAmelCase ( self : Any ) -> int:
return not is_sagemaker_model_parallel_available()
@property
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
return False
| 107 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_UpperCAmelCase : List[Any] = False
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ) -> Dict:
_A = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_A = torch.manual_seed(0 )
_A = pipe(
image=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=50, output_type='numpy', ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 107 | 1 |
'''simple docstring'''
import math
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = 0
_a = 0
while num > 0:
_a = num % 8
_a = octal + (remainder * math.floor(math.pow(10 , UpperCamelCase ) ))
counter += 1
_a = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'0o{int(UpperCamelCase )}'
def snake_case_ ():
'''simple docstring'''
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 377 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_snake_case : int = trt.Logger(trt.Logger.WARNING)
_snake_case : List[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
_snake_case : Any = parser.parse_args()
if args.tokenizer_name:
_snake_case : Any = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
_snake_case : Any = args.per_device_eval_batch_size
_snake_case : List[str] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_snake_case : Any = True
_snake_case : Optional[Any] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
_snake_case : Optional[int] = 'temp_engine/bert-fp16.engine'
if args.inta:
_snake_case : Optional[Any] = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
_snake_case : Any = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_snake_case : Any = [network.get_input(i) for i in range(network.num_inputs)]
_snake_case : int = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_snake_case : Union[str, Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_snake_case : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_snake_case : str = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : str ):
'''simple docstring'''
_a = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
_a = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
_a = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , UpperCamelCase )
# start time
_a = time.time()
# Run inference
context.execute_async(
bindings=[int(UpperCamelCase ) for d_inp in d_inputs] + [int(UpperCamelCase ), int(UpperCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(UpperCamelCase , UpperCamelCase , UpperCamelCase )
cuda.memcpy_dtoh_async(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
_a = time.time()
_a = end_time - start_time
_a = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_snake_case : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_snake_case : int = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_snake_case : Dict = raw_datasets['validation'].column_names
_snake_case : Optional[Any] = 'question' if 'question' in column_names else column_names[0]
_snake_case : Optional[Any] = 'context' if 'context' in column_names else column_names[1]
_snake_case : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_snake_case : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
_snake_case : int = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ (UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_a = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=UpperCamelCase , stride=args.doc_stride , return_overflowing_tokens=UpperCamelCase , return_offsets_mapping=UpperCamelCase , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_a = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_a = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_a = tokenized_examples.sequence_ids(UpperCamelCase )
_a = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_a = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_a = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
_snake_case : List[Any] = raw_datasets['validation']
# Validation Feature Creation
_snake_case : List[Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
_snake_case : Optional[Any] = default_data_collator
_snake_case : Any = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
_snake_case : Any = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Tuple="eval" ):
'''simple docstring'''
_a = postprocess_qa_predictions(
examples=UpperCamelCase , features=UpperCamelCase , predictions=UpperCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=UpperCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_a = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
_a = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
_a = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=UpperCamelCase , label_ids=UpperCamelCase )
_snake_case : str = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(UpperCamelCase ) ) * engine.get_binding_dtype(UpperCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
_snake_case : Tuple = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_snake_case : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_snake_case : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_snake_case : Optional[int] = cuda.mem_alloc(h_outputa.nbytes)
_snake_case : List[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_snake_case : Optional[Any] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F''' Num examples = {len(eval_dataset)}''')
logger.info(F''' Batch size = {args.per_device_eval_batch_size}''')
_snake_case : List[str] = 0.0
_snake_case : Union[str, Any] = 0
_snake_case : List[Any] = timeit.default_timer()
_snake_case : Optional[Any] = None
for step, batch in enumerate(eval_dataloader):
_snake_case , _snake_case : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_snake_case , _snake_case : Tuple = outputs
_snake_case : List[Any] = torch.tensor(start_logits)
_snake_case : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_snake_case : Union[str, Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_snake_case : Optional[int] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_snake_case : str = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_snake_case : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_snake_case : Any = nested_truncate(all_preds, len(eval_dataset))
_snake_case : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000))
logger.info('Total Number of Inference = %d', niter)
_snake_case : Union[str, Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
_snake_case : Dict = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'''Evaluation metrics: {eval_metric}''')
| 377 | 1 |
from heapq import heappop, heappush
import numpy as np
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = grid.shape
__UpperCAmelCase = [-1, 1, 0, 0]
__UpperCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__UpperCAmelCase , __UpperCAmelCase = [(0, source)], set()
__UpperCAmelCase = np.full((rows, cols) , np.inf )
__UpperCAmelCase = 0
__UpperCAmelCase = np.empty((rows, cols) , dtype=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = None
while queue:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heappop(SCREAMING_SNAKE_CASE )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__UpperCAmelCase = []
while (x, y) != source:
path.append((x, y) )
__UpperCAmelCase , __UpperCAmelCase = predecessors[x, y]
path.append(SCREAMING_SNAKE_CASE ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase , __UpperCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__UpperCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) )
__UpperCAmelCase = dist + 1
__UpperCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A_ ( _a ):
'''simple docstring'''
a__ = CustomTokenizer
pass
| 303 | 1 |
"""simple docstring"""
from collections import defaultdict
class lowerCAmelCase :
def __init__( self , a__ , a__ ):
_UpperCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_UpperCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(a__ ) )
]
_UpperCAmelCase = defaultdict(a__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_UpperCAmelCase = (1 << len(a__ )) - 1
def __A ( self , a__ , a__ ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_UpperCAmelCase = self.count_ways_until(a__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_UpperCAmelCase = total_ways_util
return self.dp[mask][task_no]
def __A ( self , a__ ):
# Store the list of persons for each task
for i in range(len(a__ ) ):
for j in task_performed[i]:
self.task[j].append(a__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCAmelCase_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCAmelCase_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 494 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 494 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
import torch
from diffusers import DiffusionPipeline
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a ) -> List[str]:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
def __call__( self ) -> Tuple:
_A : List[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_A : Tuple = 1
_A : List[str] = self.unet(_a , _a ).sample
_A : List[Any] = self.scheduler.step(_a , _a , _a ).prev_sample
_A : Union[str, Any] = scheduler_output - scheduler_output + torch.ones_like(_a )
return result
| 307 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[Any] = '''ctrl'''
snake_case__ : Optional[Any] = ['''past_key_values''']
snake_case__ : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a__=246534 , a__=256 , a__=1280 , a__=8192 , a__=48 , a__=16 , a__=0.1 , a__=0.1 , a__=1e-6 , a__=0.02 , a__=True , **a__ , ):
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : int = n_positions
__SCREAMING_SNAKE_CASE : List[Any] = n_embd
__SCREAMING_SNAKE_CASE : str = n_layer
__SCREAMING_SNAKE_CASE : str = n_head
__SCREAMING_SNAKE_CASE : Tuple = dff
__SCREAMING_SNAKE_CASE : Optional[int] = resid_pdrop
__SCREAMING_SNAKE_CASE : Dict = embd_pdrop
__SCREAMING_SNAKE_CASE : str = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = use_cache
super().__init__(**a__ )
| 714 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __A ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if openai_config_file == "":
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig()
else:
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTConfig.from_json_file(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModel(_SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
__SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
lowercase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 564 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase_ = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__magic_name__ , scheduler=__magic_name__ , movq=__magic_name__ , )
snake_case_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
if latents is None:
snake_case_ : int = randn_tensor(__magic_name__ , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case_ : int = latents.to(__magic_name__ )
snake_case_ : Any = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase (self , __magic_name__=0 ) -> Any:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case_ : Optional[int] = torch.device(F'''cuda:{gpu_id}''' )
snake_case_ : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__=0 ) -> int:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__magic_name__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case_ , snake_case_ : str = cpu_offload_with_hook(__magic_name__ , __magic_name__ , prev_module_hook=__magic_name__ )
# We'll offload the last model manually.
snake_case_ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__magic_name__ )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ = 512 , __magic_name__ = 512 , __magic_name__ = 100 , __magic_name__ = 4.0 , __magic_name__ = 1 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = self._execution_device
snake_case_ : Optional[Any] = guidance_scale > 1.0
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Tuple = torch.cat(__magic_name__ , dim=0 )
snake_case_ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Dict = torch.cat(__magic_name__ , dim=0 )
if do_classifier_free_guidance:
snake_case_ : List[str] = image_embeds.repeat_interleave(__magic_name__ , dim=0 )
snake_case_ : List[Any] = negative_image_embeds.repeat_interleave(__magic_name__ , dim=0 )
snake_case_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__magic_name__ )
self.scheduler.set_timesteps(__magic_name__ , device=__magic_name__ )
snake_case_ : Tuple = self.scheduler.timesteps
snake_case_ : Any = self.unet.config.in_channels
snake_case_ , snake_case_ : Union[str, Any] = downscale_height_and_width(__magic_name__ , __magic_name__ , self.movq_scale_factor )
# create initial latent
snake_case_ : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __magic_name__ , __magic_name__ , __magic_name__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : List[Any] = {'''image_embeds''': image_embeds}
snake_case_ : Union[str, Any] = self.unet(
sample=__magic_name__ , timestep=__magic_name__ , encoder_hidden_states=__magic_name__ , added_cond_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ : Union[str, Any] = noise_pred.chunk(2 )
snake_case_ , snake_case_ : Dict = variance_pred.chunk(2 )
snake_case_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : List[Any] = self.scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ , )[0]
# post-processing
snake_case_ : int = self.movq.decode(__magic_name__ , force_not_quantize=__magic_name__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ : Tuple = image * 0.5 + 0.5
snake_case_ : List[str] = image.clamp(0 , 1 )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ : str = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 60 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCAmelCase__ = '''pt'''
elif is_tf_available():
UpperCAmelCase__ = '''tf'''
else:
UpperCAmelCase__ = '''jax'''
class snake_case_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ByTaTokenizer
snake_case__ = False
def UpperCAmelCase__ (self: int ) -> str:
'''simple docstring'''
super().setUp()
__a : int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self: Any ) -> Optional[int]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def UpperCAmelCase__ (self: List[str] , **__UpperCAmelCase: Dict ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase__ (self: int , __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[int]=False , __UpperCAmelCase: Optional[Any]=20 , __UpperCAmelCase: Tuple=5 ) -> Tuple[str, list]:
'''simple docstring'''
__a : Tuple = []
for i in range(len(__UpperCAmelCase ) ):
try:
__a : Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__a : Any = list(filter(lambda __UpperCAmelCase : re.match(R"^[ a-zA-Z]+$" , t[1] ) , __UpperCAmelCase ) )
__a : int = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) )
if max_length is not None and len(__UpperCAmelCase ) > max_length:
__a : str = toks[:max_length]
if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0:
while len(__UpperCAmelCase ) < min_length:
__a : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
__a : Optional[int] = [t[0] for t in toks]
# Ensure consistency
__a : Any = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
if " " not in output_txt and len(__UpperCAmelCase ) > 1:
__a : Union[str, Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase )
)
if with_prefix_space:
__a : Tuple = " " + output_txt
__a : Tuple = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
return output_txt, output_ids
def UpperCAmelCase__ (self: Tuple ) -> List[Any]:
'''simple docstring'''
__a : Any = self.ta_base_tokenizer
__a : str = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
__a : int = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def UpperCAmelCase__ (self: List[str] ) -> Union[str, Any]:
'''simple docstring'''
__a : Any = self.ta_base_tokenizer
__a : int = "Unicode €."
__a : int = tokenizer(__UpperCAmelCase )
__a : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , __UpperCAmelCase )
# decoding
__a : List[str] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , "Unicode €.</s>" )
__a : Optional[int] = tokenizer("e è é ê ë" )
__a : List[str] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , __UpperCAmelCase )
# decoding
__a : Optional[Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def UpperCAmelCase__ (self: List[str] ) -> Optional[int]:
'''simple docstring'''
__a : Optional[Any] = self.ta_base_tokenizer
__a : Any = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__a : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__a : Optional[Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
if FRAMEWORK != "jax":
__a : Optional[int] = list(batch.input_ids.numpy()[0] )
else:
__a : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__a : int = self.ta_base_tokenizer
__a : str = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__a : Optional[Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __UpperCAmelCase )
self.assertIn("attention_mask" , __UpperCAmelCase )
self.assertNotIn("decoder_input_ids" , __UpperCAmelCase )
self.assertNotIn("decoder_attention_mask" , __UpperCAmelCase )
def UpperCAmelCase__ (self: Union[str, Any] ) -> str:
'''simple docstring'''
__a : List[str] = self.ta_base_tokenizer
__a : Optional[int] = [
"Summary of the text.",
"Another summary.",
]
__a : Union[str, Any] = tokenizer(
text_target=__UpperCAmelCase , max_length=32 , padding="max_length" , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def UpperCAmelCase__ (self: str ) -> str:
'''simple docstring'''
__a : Union[str, Any] = self.ta_base_tokenizer
__a : Union[str, Any] = ["A long paragraph for summarization. </s>"]
__a : Dict = ["Summary of the text. </s>"]
# fmt: off
__a : str = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__a : Union[str, Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__a : int = tokenizer(__UpperCAmelCase , text_target=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , batch["input_ids"][0] )
self.assertEqual(__UpperCAmelCase , batch["labels"][0] )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__a : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__a : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__a : int = tempfile.mkdtemp()
__a : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__a : str = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
__a : int = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
__a : Tuple = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
__a : List[str] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__a : Optional[int] = tempfile.mkdtemp()
__a : Any = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
__a : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__a : Tuple = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
__a : Optional[Any] = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
__a : List[str] = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__a : List[Any] = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__UpperCAmelCase )
def UpperCAmelCase__ (self: Optional[int] ) -> Tuple:
'''simple docstring'''
__a : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__a : Optional[int] = json.load(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__a : Any = json.load(__UpperCAmelCase )
__a : Optional[Any] = [f'<extra_id_{i}>' for i in range(125 )]
__a : Any = added_tokens_extra_ids + [
"an_additional_special_token"
]
__a : Union[str, Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__UpperCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__a : Union[str, Any] = tokenizer_class.from_pretrained(
__UpperCAmelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__a : Union[str, Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__UpperCAmelCase )]
__a : int = tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def UpperCAmelCase__ (self: List[str] ) -> List[str]:
'''simple docstring'''
__a : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
__a : Tuple = tokenizer_class.from_pretrained(__UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def UpperCAmelCase__ (self: Optional[int] ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: str ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: List[Any] ) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Dict ) -> int:
'''simple docstring'''
__a : Optional[int] = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__a : List[str] = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
__a : Any = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__ (self: List[Any] ) -> Any:
'''simple docstring'''
__a : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__a : Tuple = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__a : int = 0
__a : Optional[Any] = tokenizer.convert_ids_to_tokens(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
for attr in attributes_list:
setattr(__UpperCAmelCase , attr + "_id" , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + "_id" ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , attr + "_id" , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + "_id" ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__UpperCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__UpperCAmelCase , "additional_special_tokens_ids" ) , [] )
setattr(__UpperCAmelCase , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 351 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_lowerCAmelCase = "levit"
def __init__(self , _lowercase=224 , _lowercase=3 , _lowercase=3 , _lowercase=2 , _lowercase=1 , _lowercase=16 , _lowercase=[128, 256, 384] , _lowercase=[4, 8, 12] , _lowercase=[4, 4, 4] , _lowercase=[16, 16, 16] , _lowercase=0 , _lowercase=[2, 2, 2] , _lowercase=[2, 2, 2] , _lowercase=0.02 , **_lowercase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
__a : int = image_size
__a : Optional[int] = num_channels
__a : str = kernel_size
__a : Union[str, Any] = stride
__a : Dict = padding
__a : Optional[int] = hidden_sizes
__a : Any = num_attention_heads
__a : List[str] = depths
__a : Any = key_dim
__a : Union[str, Any] = drop_path_rate
__a : str = patch_size
__a : Tuple = attention_ratio
__a : Optional[Any] = mlp_ratio
__a : Dict = initializer_range
__a : List[str] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_lowerCAmelCase = version.parse("1.11" )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 1e-4
| 710 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class snake_case ( a_ ):
lowercase_ = '''encodec'''
def __init__( self : Union[str, Any] , a_ : Dict=[1.5, 3.0, 6.0, 12.0, 24.0] , a_ : Optional[int]=2_4000 , a_ : Optional[Any]=1 , a_ : Union[str, Any]=False , a_ : Any=None , a_ : Any=None , a_ : Optional[Any]=128 , a_ : List[Any]=32 , a_ : int=1 , a_ : Union[str, Any]=[8, 5, 4, 2] , a_ : Union[str, Any]="weight_norm" , a_ : Tuple=7 , a_ : int=7 , a_ : int=3 , a_ : Any=2 , a_ : int=True , a_ : Union[str, Any]="reflect" , a_ : Optional[Any]=2 , a_ : Tuple=2 , a_ : Dict=1.0 , a_ : Dict=1024 , a_ : Any=None , a_ : Union[str, Any]=True , **a_ : Any , )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = target_bandwidths
SCREAMING_SNAKE_CASE__ : int = sampling_rate
SCREAMING_SNAKE_CASE__ : int = audio_channels
SCREAMING_SNAKE_CASE__ : List[str] = normalize
SCREAMING_SNAKE_CASE__ : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE__ : List[Any] = overlap
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_filters
SCREAMING_SNAKE_CASE__ : int = num_residual_layers
SCREAMING_SNAKE_CASE__ : int = upsampling_ratios
SCREAMING_SNAKE_CASE__ : Optional[int] = norm_type
SCREAMING_SNAKE_CASE__ : List[Any] = kernel_size
SCREAMING_SNAKE_CASE__ : int = last_kernel_size
SCREAMING_SNAKE_CASE__ : List[str] = residual_kernel_size
SCREAMING_SNAKE_CASE__ : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE__ : str = use_causal_conv
SCREAMING_SNAKE_CASE__ : Optional[Any] = pad_mode
SCREAMING_SNAKE_CASE__ : Any = compress
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_lstm_layers
SCREAMING_SNAKE_CASE__ : str = trim_right_ratio
SCREAMING_SNAKE_CASE__ : Any = codebook_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}''' )
super().__init__(**lowerCAmelCase_ )
@property
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowercase( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 85 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__A= np.max(_SCREAMING_SNAKE_CASE,axis=-1,keepdims=_SCREAMING_SNAKE_CASE )
__A= np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1,keepdims=_SCREAMING_SNAKE_CASE )
class a__ ( a_ ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : List[str] ) -> List[str]:
__A= {}
if "second_text" in kwargs:
__A= kwargs['second_text']
return preprocess_kwargs, {}, {}
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=None ) -> int:
return self.tokenizer(lowerCAmelCase_ , text_pair=lowerCAmelCase_ , return_tensors=self.framework )
def lowerCAmelCase ( self : Dict , lowerCAmelCase_ : List[str] ) -> Tuple:
return self.model(**lowerCAmelCase_ )
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict ) -> Optional[int]:
__A= model_outputs.logits[0].numpy()
__A= softmax(lowerCAmelCase_ )
__A= np.argmax(lowerCAmelCase_ )
__A= self.model.config.idalabel[best_class]
__A= probabilities[best_class].item()
__A= logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 186 | 0 |
from collections.abc import Generator
from math import sin
def snake_case__ ( UpperCAmelCase : bytes ):
if len(UpperCAmelCase ) != 3_2:
raise ValueError("Input must be of length 32" )
lowerCAmelCase__ :Any = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case__ ( UpperCAmelCase : int ):
if i < 0:
raise ValueError("Input must be non-negative" )
lowerCAmelCase__ :List[str] = format(UpperCAmelCase , "08x" )[-8:]
lowerCAmelCase__ :Union[str, Any] = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def snake_case__ ( UpperCAmelCase : bytes ):
lowerCAmelCase__ :Tuple = B""
for char in message:
bit_string += format(UpperCAmelCase , "08b" ).encode("utf-8" )
lowerCAmelCase__ :Optional[Any] = format(len(UpperCAmelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def snake_case__ ( UpperCAmelCase : bytes ):
if len(UpperCAmelCase ) % 5_1_2 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(UpperCAmelCase ) , 5_1_2 ):
lowerCAmelCase__ :Optional[int] = bit_string[pos : pos + 5_1_2]
lowerCAmelCase__ :Any = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def snake_case__ ( UpperCAmelCase : int ):
if i < 0:
raise ValueError("Input must be non-negative" )
lowerCAmelCase__ :int = format(UpperCAmelCase , "032b" )
lowerCAmelCase__ :Dict = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase , 2 )
def snake_case__ ( UpperCAmelCase : int , UpperCAmelCase : int ):
return (a + b) % 2**3_2
def snake_case__ ( UpperCAmelCase : int , UpperCAmelCase : int ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def snake_case__ ( UpperCAmelCase : bytes ):
lowerCAmelCase__ :Union[str, Any] = preprocess(UpperCAmelCase )
lowerCAmelCase__ :List[str] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
lowerCAmelCase__ :Tuple = 0X67_452_301
lowerCAmelCase__ :List[Any] = 0XEF_CDA_B89
lowerCAmelCase__ :Dict = 0X98_BAD_CFE
lowerCAmelCase__ :Optional[Any] = 0X10_325_476
lowerCAmelCase__ :Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = aa
lowerCAmelCase__ :Any = ba
lowerCAmelCase__ :Tuple = ca
lowerCAmelCase__ :Optional[Any] = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase__ :Optional[int] = d ^ (b & (c ^ d))
lowerCAmelCase__ :str = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase__ :Tuple = c ^ (d & (b ^ c))
lowerCAmelCase__ :int = (5 * i + 1) % 1_6
elif i <= 4_7:
lowerCAmelCase__ :Optional[Any] = b ^ c ^ d
lowerCAmelCase__ :List[str] = (3 * i + 5) % 1_6
else:
lowerCAmelCase__ :Dict = c ^ (b | not_aa(UpperCAmelCase ))
lowerCAmelCase__ :int = (7 * i) % 1_6
lowerCAmelCase__ :int = (f + a + added_consts[i] + block_words[g]) % 2**3_2
lowerCAmelCase__ :Optional[Any] = d
lowerCAmelCase__ :Tuple = c
lowerCAmelCase__ :Any = b
lowerCAmelCase__ :List[str] = sum_aa(UpperCAmelCase , left_rotate_aa(UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCAmelCase__ :Any = sum_aa(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase__ :Tuple = sum_aa(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = sum_aa(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = sum_aa(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase__ :Tuple = reformat_hex(UpperCAmelCase ) + reformat_hex(UpperCAmelCase ) + reformat_hex(UpperCAmelCase ) + reformat_hex(UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
import re
def snake_case__ ( UpperCAmelCase : str ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def snake_case__ ( UpperCAmelCase : str ):
lowerCAmelCase__ :List[Any] = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : bool , UpperCAmelCase : str ):
try:
lowerCAmelCase__ :Dict = split_input(UpperCAmelCase )
if upper:
lowerCAmelCase__ :Dict = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :Any = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def snake_case__ ( UpperCAmelCase : str ):
return to_simple_case(UpperCAmelCase )
def snake_case__ ( UpperCAmelCase : str ):
try:
lowerCAmelCase__ :Any = to_simple_case(UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : bool ):
return to_complex_case(UpperCAmelCase , UpperCAmelCase , "_" )
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : bool ):
return to_complex_case(UpperCAmelCase , UpperCAmelCase , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 111 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Tuple = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """resnet"""
a_ = ["""basic""", """bottleneck"""]
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=6_4 , lowerCAmelCase_ : List[Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Tuple="bottleneck" , lowerCAmelCase_ : Dict="relu" , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : str , ) -> Tuple:
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
__lowerCAmelCase = num_channels
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = layer_type
__lowerCAmelCase = hidden_act
__lowerCAmelCase = downsample_in_first_stage
__lowerCAmelCase = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : str ) -> float:
return 1e-3
| 53 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a :
def __init__( self : Dict , lowercase_ : str , lowercase_ : Union[str, Any]=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Tuple=True , lowercase_ : int=True , lowercase_ : Dict=True , lowercase_ : Any=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Tuple=[1, 1, 2] , lowercase_ : List[Any]=1 , lowercase_ : int=32 , lowercase_ : List[Any]=4 , lowercase_ : Tuple=8 , lowercase_ : Union[str, Any]=37 , lowercase_ : Union[str, Any]="gelu_new" , lowercase_ : str=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=512 , lowercase_ : int=3 , lowercase_ : Dict=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : Dict=4 , lowercase_ : List[str]=None , lowercase_ : Tuple=False , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = block_sizes
snake_case_ = num_decoder_layers
snake_case_ = d_model
snake_case_ = n_head
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = 2
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case_ = n_head
# Used in the tests to check the size of the first hidden state
snake_case_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case_ = self.num_hidden_layers + 2
def A_ ( self : Union[str, Any] ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def A_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , ):
snake_case_ = TFFunnelModel(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def A_ ( self : List[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : List[Any] , ):
snake_case_ = TFFunnelBaseModel(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelBaseModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelBaseModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def A_ ( self : Any , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Union[str, Any] , ):
snake_case_ = TFFunnelForPreTraining(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , ):
snake_case_ = TFFunnelForMaskedLM(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Dict , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , ):
snake_case_ = self.num_labels
snake_case_ = TFFunnelForSequenceClassification(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : int , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
snake_case_ = self.num_choices
snake_case_ = TFFunnelForMultipleChoice(config=lowercase_ )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , ):
snake_case_ = self.num_labels
snake_case_ = TFFunnelForTokenClassification(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , ):
snake_case_ = TFFunnelForQuestionAnswering(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Any ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : int ):
snake_case_ = TFFunnelModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ )
def A_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_ )
def A_ ( self : str ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@require_tf
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case_ = False
snake_case_ = False
def A_ ( self : Union[str, Any] ):
snake_case_ = TFFunnelModelTester(self , base=lowercase_ )
snake_case_ = ConfigTester(self , config_class=lowercase_ )
def A_ ( self : Dict ):
self.config_tester.run_common_tests()
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def A_ ( self : str ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
| 640 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.0_2 , lowerCamelCase=4 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_attention_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_choices
def A_ ( self ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_attention_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ = config_and_inputs
snake_case__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ = config_and_inputs
snake_case__ = True
snake_case__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Union[str, Any] = True
_A : List[str] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A_ ( self ):
snake_case__ = FlaxRobertaModelTester(self )
@slow
def A_ ( self ):
for model_class_name in self.all_model_classes:
snake_case__ = model_class_name.from_pretrained("roberta-base" , from_pt=UpperCamelCase_ )
snake_case__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 710 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return EnvironmentCommand()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return EnvironmentCommand(args.accelerate_config_file )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
@staticmethod
def A_ ( lowerCamelCase ):
snake_case__ = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCamelCase )
download_parser.add_argument(
"--accelerate-config_file" , default=lowerCamelCase , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , *lowerCamelCase ):
snake_case__ = accelerate_config_file
def A_ ( self ):
snake_case__ = "not installed"
if is_safetensors_available():
import safetensors
snake_case__ = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
snake_case__ = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
snake_case__ = "not installed"
snake_case__ = snake_case__ = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
snake_case__ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCamelCase ):
snake_case__ = load_config_from_file(self._accelerate_config_file ).to_dict()
snake_case__ = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase , lowerCamelCase )
else F"""\t{accelerate_config}"""
)
snake_case__ = "not installed"
snake_case__ = "NA"
if is_torch_available():
import torch
snake_case__ = torch.__version__
snake_case__ = torch.cuda.is_available()
snake_case__ = "not installed"
snake_case__ = "NA"
if is_tf_available():
import tensorflow as tf
snake_case__ = tf.__version__
try:
# deprecated in v2.1
snake_case__ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
snake_case__ = bool(tf.config.list_physical_devices("GPU" ) )
snake_case__ = "not installed"
snake_case__ = "not installed"
snake_case__ = "not installed"
snake_case__ = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
snake_case__ = flax.__version__
snake_case__ = jax.__version__
snake_case__ = jaxlib.__version__
snake_case__ = jax.lib.xla_bridge.get_backend().platform
snake_case__ = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F"""{safetensors_version}""",
"Accelerate version": F"""{accelerate_version}""",
"Accelerate config": F"""{accelerate_config_str}""",
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": F"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": F"""{flax_version} ({jax_backend})""",
"Jax version": F"""{jax_version}""",
"JaxLib version": F"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCamelCase ) )
return info
@staticmethod
def A_ ( lowerCamelCase ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 530 | 0 |
import torch
from diffusers import DiffusionPipeline
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
def __call__( self ):
UpperCamelCase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCamelCase__ = 1
UpperCamelCase__ = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
UpperCamelCase__ = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
UpperCamelCase__ = scheduler_output - scheduler_output + torch.ones_like(__lowerCAmelCase )
return result
| 619 |
from __future__ import annotations
from math import pi
def _UpperCamelCase (a__ :float , a__ :float , a__ :float ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 619 | 1 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : int = int(number**0.5 )
return number == sq * sq
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A_ : int = x_den * y_den * z_den
A_ : int = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 35 ):
A_ : set = set()
A_ : int
A_ : Fraction = Fraction(0 )
A_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A_ : Union[str, Any] = x_num * y_den + x_den * y_num
A_ : Optional[int] = x_den * y_den
A_ : Any = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : Optional[int] = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
A_ : Union[str, Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A_ : str = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
A_ : str = int(sqrt(SCREAMING_SNAKE_CASE ) )
A_ : Optional[Any] = int(sqrt(SCREAMING_SNAKE_CASE ) )
A_ : Union[str, Any] = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : List[str] = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=-1
A_ : str = x_num * y_num
A_ : Tuple = x_den * y_num + x_num * y_den
A_ : List[str] = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : Tuple = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
A_ : List[Any] = x_num * x_num * y_num * y_num
A_ : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
A_ : Dict = int(sqrt(SCREAMING_SNAKE_CASE ) )
A_ : Optional[Any] = int(sqrt(SCREAMING_SNAKE_CASE ) )
A_ : List[str] = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : int = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 152 |
from __future__ import annotations
from math import pow, sqrt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) + pow(SCREAMING_SNAKE_CASE , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 152 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
UpperCAmelCase__ : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(A ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def __lowercase ( self : List[str] ,A : Dict ,A : List[Any] ,A : int ,A : Tuple=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
UpperCAmelCase__ : str = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
UpperCAmelCase__ : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
UpperCAmelCase__ : List[str] = black.format_str(A ,mode=A )
UpperCAmelCase__ : List[str] = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(A ,"""w""" ,newline="""\n""" ) as f:
f.write(A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=A )
with open(A ,"""r""" ) as f:
self.assertTrue(f.read() ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(A ,A )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,A ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,A ) ,)
# Copy consistency with a really long name
UpperCAmelCase__ : Optional[Any] = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" ,f"{long_class_name}SchedulerOutput" ,re.sub("""Bert""" ,A ,A ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,A ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,A ) ,)
| 65 |
'''simple docstring'''
from collections.abc import Sequence
def _A ( _lowerCAmelCase = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
__lowercase =nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
__lowercase =nums[i]
__lowercase =max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase = int(input("""Enter number of elements : """).strip())
lowerCamelCase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 474 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __UpperCAmelCase ):
__a = ["""image_processor""", """tokenizer"""]
__a = """ChineseCLIPImageProcessor"""
__a = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
_UpperCAmelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
_UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class | 717 |
from typing import Dict
from .base import GenericTensor, Pipeline
class _A ( __lowercase ):
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
if tokenize_kwargs is None:
_UpperCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
_UpperCAmelCase = truncation
_UpperCAmelCase = tokenize_kwargs
_UpperCAmelCase = {}
if return_tensors is not None:
_UpperCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.framework
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_inputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) | 175 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase = logging.get_logger(__name__)
# General docstring
UpperCAmelCase = '''RegNetConfig'''
# Base docstring
UpperCAmelCase = '''facebook/regnet-y-040'''
UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
UpperCAmelCase = '''facebook/regnet-y-040'''
UpperCAmelCase = '''tabby, tabby cat'''
UpperCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = 1 , snake_case = "relu" , ):
super().__init__()
lowercase = nn.Convad(
snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , groups=snake_case , bias=snake_case , )
lowercase = nn.BatchNormad(snake_case )
lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.convolution(snake_case )
lowercase = self.normalization(snake_case )
lowercase = self.activation(snake_case )
return hidden_state
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case ):
super().__init__()
lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase = self.embedder(snake_case )
return hidden_state
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case = 2 ):
super().__init__()
lowercase = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case )
lowercase = nn.BatchNormad(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.convolution(snake_case )
lowercase = self.normalization(snake_case )
return hidden_state
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
super().__init__()
lowercase = nn.AdaptiveAvgPoolad((1, 1) )
lowercase = nn.Sequential(
nn.Convad(snake_case , snake_case , kernel_size=1 ) , nn.ReLU() , nn.Convad(snake_case , snake_case , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# b c h w -> b c 1 1
lowercase = self.pooler(snake_case )
lowercase = self.attention(snake_case )
lowercase = hidden_state * attention
return hidden_state
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 1 ):
super().__init__()
lowercase = in_channels != out_channels or stride != 1
lowercase = max(1 , out_channels // config.groups_width )
lowercase = (
RegNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase = nn.Sequential(
RegNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(snake_case , snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act ) , RegNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , )
lowercase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = hidden_state
lowercase = self.layer(snake_case )
lowercase = self.shortcut(snake_case )
hidden_state += residual
lowercase = self.activation(snake_case )
return hidden_state
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 1 ):
super().__init__()
lowercase = in_channels != out_channels or stride != 1
lowercase = max(1 , out_channels // config.groups_width )
lowercase = (
RegNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase = nn.Sequential(
RegNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(snake_case , snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act ) , RegNetSELayer(snake_case , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , )
lowercase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = hidden_state
lowercase = self.layer(snake_case )
lowercase = self.shortcut(snake_case )
hidden_state += residual
lowercase = self.activation(snake_case )
return hidden_state
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ):
super().__init__()
lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
snake_case , snake_case , snake_case , stride=snake_case , ) , *[layer(snake_case , snake_case , snake_case ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.layers(snake_case )
return hidden_state
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case ):
super().__init__()
lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ):
self.stages.append(RegNetStage(snake_case , snake_case , snake_case , depth=snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = False , snake_case = True ):
lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
lowercase = stage_module(snake_case )
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=snake_case , hidden_states=snake_case )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = RegNetConfig
_UpperCamelCase : Optional[Any] = """regnet"""
_UpperCamelCase : Tuple = """pixel_values"""
_UpperCamelCase : str = True
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if isinstance(snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=False ):
if isinstance(snake_case , snake_case ):
lowercase = value
UpperCAmelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , __lowerCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case ):
super().__init__(snake_case )
lowercase = config
lowercase = RegNetEmbeddings(snake_case )
lowercase = RegNetEncoder(snake_case )
lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None ):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.embedder(snake_case )
lowercase = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case )
lowercase = encoder_outputs[0]
lowercase = self.pooler(snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __lowerCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case ):
super().__init__(snake_case )
lowercase = config.num_labels
lowercase = RegNetModel(snake_case )
# classification head
lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ):
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.regnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case )
lowercase = outputs.pooler_output if return_dict else outputs[1]
lowercase = self.classifier(snake_case )
lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase = 'single_label_classification'
else:
lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase = MSELoss()
if self.num_labels == 1:
lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase = loss_fct(snake_case , snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase = BCEWithLogitsLoss()
lowercase = loss_fct(snake_case , snake_case )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states )
| 84 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( UpperCamelCase , unittest.TestCase ):
lowercase_ : Optional[int] = SpeechTaTokenizer
lowercase_ : Dict = False
lowercase_ : Any = True
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase :Union[str, Any] = SpeechTaTokenizer(UpperCAmelCase )
lowerCAmelCase :Any = AddedToken('<mask>' , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )
lowerCAmelCase :List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : Any ) -> Union[str, Any]:
lowerCAmelCase :Tuple = 'this is a test'
lowerCAmelCase :Union[str, Any] = 'this is a test'
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=False , UpperCAmelCase : List[Any]=20 , UpperCAmelCase : str=5 ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase :Dict = self.get_input_output_texts(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCAmelCase :Union[str, Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
lowerCAmelCase :List[Any] = '<pad>'
lowerCAmelCase :Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase__ ( self : Any ) -> Any:
lowerCAmelCase :Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCAmelCase ) , 81 )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
lowerCAmelCase :Any = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase :Optional[int] = tokenizer.vocab_size
lowerCAmelCase :Union[str, Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase :Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowerCAmelCase :Union[str, Any] = tokenizer.add_tokens(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.vocab_size
lowerCAmelCase :List[Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size + len(UpperCAmelCase ) )
lowerCAmelCase :int = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase :List[Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowerCAmelCase :str = tokenizer.add_special_tokens(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = tokenizer.vocab_size
lowerCAmelCase :Tuple = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size_a + len(UpperCAmelCase ) )
lowerCAmelCase :List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : str ) -> int:
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase :Optional[int] = self.get_tokenizer()
lowerCAmelCase :List[str] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowerCAmelCase :List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
lowerCAmelCase :Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
# fmt: off
self.assertListEqual(UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowerCAmelCase :int = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCAmelCase__ ( self : str ) -> Tuple:
# Use custom sequence because this tokenizer does not handle numbers.
lowerCAmelCase :Any = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
lowerCAmelCase :List[str] = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCAmelCase , ) | 553 | 0 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
'''simple docstring'''
pass | 599 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( a_ : bytes , a_ : int ) -> np.array:
UpperCAmelCase__ : Union[str, Any] = f"""{sampling_rate}"""
UpperCAmelCase__ : List[Any] = '''1'''
UpperCAmelCase__ : int = '''f32le'''
UpperCAmelCase__ : Tuple = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase__ : Dict = ffmpeg_process.communicate(a_ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
UpperCAmelCase__ : Dict = output_stream[0]
UpperCAmelCase__ : int = np.frombuffer(a_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def lowerCAmelCase__ ( a_ : int , a_ : float , a_ : str = "f32le" , ) -> List[str]:
UpperCAmelCase__ : str = f"""{sampling_rate}"""
UpperCAmelCase__ : Tuple = '''1'''
if format_for_conversion == "s16le":
UpperCAmelCase__ : str = 2
elif format_for_conversion == "f32le":
UpperCAmelCase__ : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
UpperCAmelCase__ : Dict = platform.system()
if system == "Linux":
UpperCAmelCase__ : Union[str, Any] = '''alsa'''
UpperCAmelCase__ : List[Any] = '''default'''
elif system == "Darwin":
UpperCAmelCase__ : List[str] = '''avfoundation'''
UpperCAmelCase__ : List[Any] = ''':0'''
elif system == "Windows":
UpperCAmelCase__ : Optional[int] = '''dshow'''
UpperCAmelCase__ : Any = '''default'''
UpperCAmelCase__ : str = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
UpperCAmelCase__ : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase__ : Dict = _ffmpeg_stream(a_ , a_ )
for item in iterator:
yield item
def lowerCAmelCase__ ( a_ : int , a_ : float , a_ : Optional[int] = None , a_ : Optional[Union[Tuple[float, float], float]] = None , a_ : str = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCAmelCase__ : int = stream_chunk_s
else:
UpperCAmelCase__ : str = chunk_length_s
UpperCAmelCase__ : Any = ffmpeg_microphone(a_ , a_ , format_for_conversion=a_ )
if format_for_conversion == "s16le":
UpperCAmelCase__ : Dict = np.intaa
UpperCAmelCase__ : List[Any] = 2
elif format_for_conversion == "f32le":
UpperCAmelCase__ : Tuple = np.floataa
UpperCAmelCase__ : List[str] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
UpperCAmelCase__ : Any = chunk_length_s / 6
UpperCAmelCase__ : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a_ , (int, float) ):
UpperCAmelCase__ : int = [stride_length_s, stride_length_s]
UpperCAmelCase__ : List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase__ : Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase__ : Optional[int] = datetime.datetime.now()
UpperCAmelCase__ : Dict = datetime.timedelta(seconds=a_ )
for item in chunk_bytes_iter(a_ , a_ , stride=(stride_left, stride_right) , stream=a_ ):
# Put everything back in numpy scale
UpperCAmelCase__ : str = np.frombuffer(item['''raw'''] , dtype=a_ )
UpperCAmelCase__ : Any = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
UpperCAmelCase__ : List[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( a_ : str , a_ : int , a_ : Tuple[int, int] , a_ : bool = False ) -> Any:
UpperCAmelCase__ : Union[str, Any] = B''''''
UpperCAmelCase__ , UpperCAmelCase__ : int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
UpperCAmelCase__ : List[Any] = 0
for raw in iterator:
acc += raw
if stream and len(a_ ) < chunk_len:
UpperCAmelCase__ : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a_ ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase__ : Union[str, Any] = (_stride_left, stride_right)
UpperCAmelCase__ : int = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
UpperCAmelCase__ : List[Any] = False
yield item
UpperCAmelCase__ : Optional[int] = stride_left
UpperCAmelCase__ : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a_ ) > stride_left:
UpperCAmelCase__ : List[str] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
UpperCAmelCase__ : Optional[Any] = False
yield item
def lowerCAmelCase__ ( a_ : str , a_ : int ) -> Any:
UpperCAmelCase__ : str = 2**2_4 # 16Mo
try:
with subprocess.Popen(a_ , stdout=subprocess.PIPE , bufsize=a_ ) as ffmpeg_process:
while True:
UpperCAmelCase__ : Tuple = ffmpeg_process.stdout.read(a_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error | 599 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
class __magic_name__ ( snake_case__ ):
UpperCAmelCase ='encoder-decoder'
UpperCAmelCase =True
def __init__( self , **snake_case) -> str:
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_UpperCAmelCase : Optional[Any] =kwargs.pop('encoder')
_UpperCAmelCase : Dict =encoder_config.pop('model_type')
_UpperCAmelCase : str =kwargs.pop('decoder')
_UpperCAmelCase : str =decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
_UpperCAmelCase : str =AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase)
_UpperCAmelCase : Tuple =AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase)
_UpperCAmelCase : Any =True
@classmethod
def lowerCAmelCase ( cls , snake_case , snake_case , **snake_case) -> Optional[Any]:
'''simple docstring'''
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
_UpperCAmelCase : Dict =True
_UpperCAmelCase : List[str] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =copy.deepcopy(self.__dict__)
_UpperCAmelCase : Optional[int] =self.encoder.to_dict()
_UpperCAmelCase : Union[str, Any] =self.decoder.to_dict()
_UpperCAmelCase : List[str] =self.__class__.model_type
return output
| 446 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
a : Tuple = XCLIPTextConfig()
# derive patch size from model name
a : List[Any] = model_name.find('patch' )
a : Any = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
a : Optional[int] = XCLIPVisionConfig(patch_size=UpperCAmelCase__ , num_frames=UpperCAmelCase__ )
if "large" in model_name:
a : List[str] = 768
a : Union[str, Any] = 3072
a : Optional[int] = 12
a : Any = 1024
a : Optional[Any] = 4096
a : Optional[int] = 16
a : Union[str, Any] = 24
a : List[str] = 768
a : Optional[int] = 3072
if model_name == "xclip-large-patch14-16-frames":
a : Tuple = 336
a : Optional[int] = XCLIPConfig.from_text_vision_configs(UpperCAmelCase__ , UpperCAmelCase__ )
if "large" in model_name:
a : Optional[int] = 768
return config
def A_ ( UpperCAmelCase__ ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
a : Optional[int] = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
a : str = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
a : Tuple = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
a : List[Any] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
a : List[str] = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
a : Dict = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
a : int = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
a : str = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
a : Any = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
a : Tuple = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
a : int = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
a : Optional[int] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
a : Union[str, Any] = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
a : Union[str, Any] = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
a : Any = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
a : Union[str, Any] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
a : Optional[int] = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
a : Tuple = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
a : Any = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
a : int = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
a : str = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
a : Union[str, Any] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
a : Optional[Any] = orig_state_dict.pop(UpperCAmelCase__ )
if "attn.in_proj" in key:
a : Tuple = key.split('.' )
if key.startswith('visual' ):
a : Tuple = key_split[3]
a : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
a : Optional[Any] = val[
:dim, :
]
a : Union[str, Any] = val[
dim : dim * 2, :
]
a : str = val[
-dim:, :
]
else:
a : Tuple = val[
:dim
]
a : Any = val[
dim : dim * 2
]
a : Dict = val[
-dim:
]
else:
if "weight" in key:
a : Dict = val[
:dim, :
]
a : List[str] = val[
dim : dim * 2, :
]
a : Optional[int] = val[
-dim:, :
]
else:
a : int = val[:dim]
a : List[str] = val[
dim : dim * 2
]
a : int = val[-dim:]
elif key.startswith('mit' ):
a : str = key_split[2]
a : List[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Any = val[dim : dim * 2, :]
a : Any = val[-dim:, :]
else:
a : str = val[:dim]
a : Optional[Any] = val[dim : dim * 2]
a : Optional[Any] = val[-dim:]
else:
a : Dict = key_split[2]
a : List[Any] = config.text_config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Tuple = val[
dim : dim * 2, :
]
a : List[str] = val[-dim:, :]
else:
a : List[Any] = val[:dim]
a : Optional[int] = val[
dim : dim * 2
]
a : Any = val[-dim:]
else:
a : Optional[Any] = rename_key(UpperCAmelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
a : List[Any] = val.T
a : Union[str, Any] = val
return orig_state_dict
def A_ ( UpperCAmelCase__ ) -> Optional[Any]:
if num_frames == 8:
a : Tuple = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
a : Optional[Any] = 'eating_spaghetti.npy'
elif num_frames == 32:
a : Optional[int] = 'eating_spaghetti_32_frames.npy'
a : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=UpperCAmelCase__ , repo_type='dataset' , )
a : Tuple = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=False ) -> str:
a : str = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
a : Dict = model_to_url[model_name]
a : Tuple = 8
if "16-frames" in model_name:
a : str = 16
elif "shot" in model_name:
a : Tuple = 32
a : Dict = get_xclip_config(UpperCAmelCase__ , UpperCAmelCase__ )
a : Union[str, Any] = XCLIPModel(UpperCAmelCase__ )
model.eval()
if "drive" in checkpoint_url:
a : int = 'pytorch_model.bin'
gdown.cached_download(UpperCAmelCase__ , UpperCAmelCase__ , quiet=UpperCAmelCase__ )
a : Optional[int] = torch.load(UpperCAmelCase__ , map_location='cpu' )['model']
else:
a : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase__ )['model']
a : Tuple = convert_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
a : int = XCLIPModel(UpperCAmelCase__ )
a , a : Dict = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
a : Any = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
a : str = VideoMAEImageProcessor(size=UpperCAmelCase__ )
a : int = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
a : List[Any] = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
a : List[str] = XCLIPProcessor(image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
a : List[Any] = prepare_video(UpperCAmelCase__ )
a : List[Any] = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=UpperCAmelCase__ , return_tensors='pt' , padding=UpperCAmelCase__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
a : List[str] = model(**UpperCAmelCase__ )
# Verify outputs
a : List[str] = outputs.logits_per_video
a : Union[str, Any] = logits_per_video.softmax(dim=1 )
print('Probs:' , UpperCAmelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
a : Union[str, Any] = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
a : Union[str, Any] = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
a : Optional[Any] = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
a : Tuple = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
a : str = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
a : int = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
a : List[str] = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
a : Tuple = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
a : Optional[Any] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
a : List[Any] = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
a : str = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
a : Tuple = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
a : Dict = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
a : Optional[Any] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
a : int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
a : Optional[Any] = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
a : int = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
a : Optional[Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(F'Model name {model_name} not supported' )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(UpperCAmelCase__ , organization='nielsr' )
processor.push_to_hub(UpperCAmelCase__ , organization='nielsr' )
slow_tokenizer.push_to_hub(UpperCAmelCase__ , organization='nielsr' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 509 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
SCREAMING_SNAKE_CASE__ : Tuple = TypeVar("T")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
a : Any | T = None
a : int = len(__UpperCAmelCase )
a : list[T] = [any_type for _ in range(self.N )] + arr
a : str = fnc
self.build()
def lowercase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
a : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
p += self.N
a : str = v
while p > 1:
a : str = p // 2
a : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase ) -> T | None: # noqa: E741
a , a : Dict = l + self.N, r + self.N
a : T | None = None
while l <= r:
if l % 2 == 1:
a : Any = self.st[l] if res is None else self.fn(__UpperCAmelCase , self.st[l] )
if r % 2 == 0:
a : Optional[Any] = self.st[r] if res is None else self.fn(__UpperCAmelCase , self.st[r] )
a , a : int = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
SCREAMING_SNAKE_CASE__ : List[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
SCREAMING_SNAKE_CASE__ : str = SegmentTree(test_array, min)
SCREAMING_SNAKE_CASE__ : Optional[int] = SegmentTree(test_array, max)
SCREAMING_SNAKE_CASE__ : int = SegmentTree(test_array, lambda a, b: a + b)
def A_ ( ) -> None:
for i in range(len(UpperCAmelCase__ ) ):
for j in range(UpperCAmelCase__ , len(UpperCAmelCase__ ) ):
a : List[str] = reduce(UpperCAmelCase__ , test_array[i : j + 1] )
a : List[Any] = reduce(UpperCAmelCase__ , test_array[i : j + 1] )
a : str = reduce(lambda UpperCAmelCase__ , UpperCAmelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
assert max_range == max_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
assert sum_range == sum_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
test_all_segments()
for index, value in test_updates.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 509 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str =logging.get_logger()
@dataclass
class _A :
snake_case__ : nn.Module
snake_case__ : List[nn.Module] = field(default_factory=lowerCAmelCase )
snake_case__ : list = field(default_factory=lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = len(list(m.modules() ) ) == 1 or isinstance(__lowerCAmelCase , nn.Convad ) or isinstance(__lowerCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__lowerCAmelCase )
def __call__( self , __lowerCAmelCase ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__lowerCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self ):
"""simple docstring"""
return list(filter(lambda __lowerCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _A :
snake_case__ : nn.Module
snake_case__ : nn.Module
snake_case__ : int = 1
snake_case__ : List = field(default_factory=lowerCAmelCase )
snake_case__ : List = field(default_factory=lowerCAmelCase )
snake_case__ : bool = True
def __call__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = Tracker(self.dest )(__lowerCAmelCase ).parametrized
lowercase = Tracker(self.src )(__lowerCAmelCase ).parametrized
lowercase = list(filter(lambda __lowerCAmelCase : type(__lowerCAmelCase ) not in self.src_skip , __lowerCAmelCase ) )
lowercase = list(filter(lambda __lowerCAmelCase : type(__lowerCAmelCase ) not in self.dest_skip , __lowerCAmelCase ) )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(__lowerCAmelCase )} operations while'
f' destination module has {len(__lowerCAmelCase )}.' )
for dest_m, src_m in zip(__lowerCAmelCase , __lowerCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowercase = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), f'Unexpected layer name {k}'
lowercase = len(__lowerCAmelCase ) + 1
feature_blocks.append((f'res{block_index}', v) )
lowercase = nn.ModuleDict(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return get_trunk_forward_outputs(
__lowerCAmelCase , out_feat_keys=__lowerCAmelCase , feature_blocks=self._feature_blocks , )
class _A ( lowerCAmelCase ):
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
if x not in self:
lowercase = self.convert_name_to_timm(__lowerCAmelCase )
lowercase = partial(lambda: (timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval(), None) )
else:
lowercase = super().__getitem__(__lowerCAmelCase )
return val
class _A ( lowerCAmelCase ):
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
lowercase = RegNetModel
else:
lowercase = RegNetForImageClassification
return val
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Tuple[str, str]] ) -> str:
'''simple docstring'''
for from_key, to_key in keys:
lowercase = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Callable[[], nn.Module] , lowerCAmelCase__ :Callable[[], nn.Module] , lowerCAmelCase__ :RegNetConfig , lowerCAmelCase__ :Path , lowerCAmelCase__ :bool = True , ) -> Any:
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
lowercase , lowercase = from_model_func()
lowercase = our_model_func(lowerCAmelCase__ ).eval()
lowercase = ModuleTransfer(src=lowerCAmelCase__ , dest=lowerCAmelCase__ , raise_if_mismatch=lowerCAmelCase__ )
lowercase = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowerCAmelCase__ )
if from_state_dict is not None:
lowercase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowercase = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
lowercase = manually_copy_vissl_head(lowerCAmelCase__ , our_model.state_dict() , lowerCAmelCase__ )
our_model.load_state_dict(lowerCAmelCase__ )
lowercase = our_model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
lowercase = (
our_outputs.logits if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else our_outputs.last_hidden_state
)
lowercase = from_model(lowerCAmelCase__ )
lowercase = from_output[-1] if type(lowerCAmelCase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowercase = our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase__ , )
lowercase = 2_2_4 if """seer""" not in name else 3_8_4
# we can use the convnext one
lowercase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=lowerCAmelCase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase__ , )
print(f'Pushed {name}' )
def UpperCAmelCase__ ( lowerCAmelCase__ :Path , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = True ) -> List[str]:
'''simple docstring'''
lowercase = """imagenet-1k-id2label.json"""
lowercase = 1_0_0_0
lowercase = (1, num_labels)
lowercase = """huggingface/label-files"""
lowercase = num_labels
lowercase = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = partial(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ )
lowercase = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
lowercase = NameToOurModelFuncMap()
lowercase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase__ :str , lowerCAmelCase__ :Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , model_dir=str(lowerCAmelCase__ ) , map_location="""cpu""" )
lowercase = model_func()
# check if we have a head, if yes add it
lowercase = files["""classy_state_dict"""]["""base_model"""]["""model"""]
lowercase = model_state_dict["""trunk"""]
model.load_state_dict(lowerCAmelCase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
lowercase = partial(
lowerCAmelCase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase = partial(
lowerCAmelCase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase = partial(
lowerCAmelCase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowercase = partial(
lowerCAmelCase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
lowercase = partial(
lowerCAmelCase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase = partial(
lowerCAmelCase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase = partial(
lowerCAmelCase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowercase = partial(
lowerCAmelCase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowerCAmelCase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
return config, expected_shape
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
__lowerCAmelCase : Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 359 | """simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : List[str] =[
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _A ( unittest.TestCase ):
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = None
lowercase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowercase = os.path.abspath("""examples""" )
for item in os.listdir(__lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowercase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCAmelCase , feature_script=__lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowercase = compare_against_test(
os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = """\n""".join(__lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
lowercase = diff.replace(__lowerCAmelCase , """""" )
self.assertEqual(__lowerCAmelCase , """""" )
def A__ ( self ):
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowercase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _A ( lowerCAmelCase ):
snake_case__ : Any = False
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().setUpClass()
lowercase = tempfile.mkdtemp()
lowercase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowercase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
else:
lowercase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
lowercase = re.findall("""({.+})""" , __lowerCAmelCase )
lowercase = [r for r in results if """accuracy""" in r][-1]
lowercase = ast.literal_eval(__lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5 )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowercase = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , """tracking""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 359 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCAmelCase : Optional[int] =logging.getLogger(__name__)
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_snake_case = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
_snake_case = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case = field(
default=__A , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_snake_case = field(
default=__A , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
_snake_case = field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_snake_case = field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_snake_case = field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
_snake_case = field(
default=__A , metadata={'help': 'A csv or a json file containing the training data.'} )
_snake_case = field(
default=__A , metadata={'help': 'A csv or a json file containing the validation data.'} )
_snake_case = field(default=__A , metadata={'help': 'A csv or a json file containing the test data.'} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""")
else:
_lowerCamelCase : Any = self.train_file.split(""".""")[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowerCamelCase : List[Any] = self.validation_file.split(""".""")[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
default=__A , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case = field(
default=__A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case = field(
default=__A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case = field(
default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_snake_case = field(
default=__A , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_snake_case = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_snake_case = field(
default=__A , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCamelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(__lowercase )
datasets.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCamelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCamelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowerCamelCase : Tuple = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowerCamelCase : str = data_args.train_file.split(""".""" )[-1]
_lowerCamelCase : Optional[int] = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowerCamelCase : List[str] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
_lowerCamelCase : Any = load_dataset("""csv""" , data_files=__lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowerCamelCase : List[str] = load_dataset("""json""" , data_files=__lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowerCamelCase : List[str] = raw_datasets['train'].features['label'].names
_lowerCamelCase : Union[str, Any] = len(__lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowerCamelCase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowercase , )
_lowerCamelCase : Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowerCamelCase : Optional[Any] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCamelCase : List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowerCamelCase : List[str] = {'Refused': 0, 'Entailed': 1}
_lowerCamelCase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_lowerCamelCase : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__A ):
# Tokenize the texts
def _convert_table_text_to_pandas(__A ):
_lowerCamelCase : int = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
_lowerCamelCase : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowerCamelCase : int = examples['statement']
_lowerCamelCase : str = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
_lowerCamelCase : List[str] = tokenizer(__lowercase , __lowercase , padding=__lowercase , max_length=__lowercase , truncation=__lowercase )
_lowerCamelCase : Dict = examples['label']
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
_lowerCamelCase : str = raw_datasets.map(
__lowercase , batched=__lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_lowerCamelCase : int = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowerCamelCase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_lowerCamelCase : List[str] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowerCamelCase : int = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
_lowerCamelCase : int = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowerCamelCase : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__lowercase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A ):
_lowerCamelCase : Optional[int] = p.predictions[0] if isinstance(p.predictions , __lowercase ) else p.predictions
_lowerCamelCase : List[Any] = np.argmax(__lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCamelCase : int = default_data_collator
elif training_args.fpaa:
_lowerCamelCase : Any = DataCollatorWithPadding(__lowercase , pad_to_multiple_of=8 )
else:
_lowerCamelCase : Union[str, Any] = None
# Initialize our Trainer
_lowerCamelCase : str = Trainer(
model=__lowercase , args=__lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
_lowerCamelCase : Tuple = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : str = last_checkpoint
_lowerCamelCase : Any = trainer.train(resume_from_checkpoint=__lowercase )
_lowerCamelCase : Optional[Any] = train_result.metrics
_lowerCamelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase )
)
_lowerCamelCase : Any = min(__lowercase , len(__lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __lowercase )
trainer.save_metrics("""train""" , __lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCamelCase : List[Any] = trainer.evaluate(eval_dataset=__lowercase )
_lowerCamelCase : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowercase )
_lowerCamelCase : str = min(__lowercase , len(__lowercase ) )
trainer.log_metrics("""eval""" , __lowercase )
trainer.save_metrics("""eval""" , __lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowerCamelCase : Optional[int] = predict_dataset.remove_columns("""label""" )
_lowerCamelCase : Union[str, Any] = trainer.predict(__lowercase , metric_key_prefix="""predict""" ).predictions
_lowerCamelCase : Union[str, Any] = np.argmax(__lowercase , axis=1 )
_lowerCamelCase : Union[str, Any] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__lowercase ):
_lowerCamelCase : List[str] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
_lowerCamelCase : Dict = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
def A__ ( __A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 0 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : List[str] = 16
_snake_case : int = 32
def snake_case_ (UpperCamelCase : Accelerator , UpperCamelCase : DatasetDict , UpperCamelCase : List[int] , UpperCamelCase : List[int] , UpperCamelCase : int = 16 ):
'''simple docstring'''
_a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_a = DatasetDict(
{
'''train''': dataset['''train'''].select(UpperCamelCase ),
'''validation''': dataset['''train'''].select(UpperCamelCase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(UpperCamelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
UpperCamelCase , padding='''longest''' , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
_a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
_a = DataLoader(
tokenized_datasets['''test'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = []
# Download the dataset
_a = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
_a = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config['''lr''']
_a = int(config['''num_epochs'''] )
_a = int(config['''seed'''] )
_a = int(config['''batch_size'''] )
_a = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase )
# New Code #
# Create our folds:
_a = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
_a = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase ):
_a , _a , _a = get_fold_dataloaders(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=UpperCamelCase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Now we train the model
for epoch in range(UpperCamelCase ):
model.train()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**UpperCamelCase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**UpperCamelCase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
_a = []
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**UpperCamelCase )
_a = outputs.logits
_a , _a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a = torch.cat(UpperCamelCase , dim=0 )
_a = torch.stack(UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a = metric.compute(predictions=UpperCamelCase , references=UpperCamelCase )
accelerator.print('''Average test metrics from all folds:''' , UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
_a = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase , default=UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=UpperCamelCase , default=3 , help='''The number of splits to perform across the dataset''' )
_a = parser.parse_args()
_a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 22 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE__ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 533 | 0 |
# Function to print upper half of diamond (pyramid)
def a_ (_lowerCAmelCase : List[Any] )-> List[str]:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def a_ (_lowerCAmelCase : Union[str, Any] )-> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def a_ (_lowerCAmelCase : Dict )-> Optional[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
__lowerCAmelCase : Dict = 1
while K:
__lowerCAmelCase : int = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__lowerCAmelCase : int = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 164 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__lowerCamelCase ) -> Any:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case: List[Any] = deprecated_arg[3:]
setattr(self , __lowerCamelCase , not kwargs.pop(__lowerCamelCase ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
snake_case: Tuple = kwargs.pop("""torchscript""" , self.torchscript )
snake_case: int = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
snake_case: List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowerCamelCase )
__lowerCamelCase = field(default=__snake_case , metadata={'help': 'Trace the models using torchscript'} )
__lowerCamelCase = field(default=__snake_case , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__lowerCamelCase = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowerCAmelCase_ ( self ) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
snake_case: List[str] = torch.device("""cpu""" )
snake_case: Optional[int] = 0
elif is_torch_tpu_available():
snake_case: Union[str, Any] = xm.xla_device()
snake_case: Dict = 0
else:
snake_case: List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
snake_case: str = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCAmelCase_ ( self ) -> "torch.device":
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
return self.n_gpu > 0
| 164 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : Any = tokenizer('''This is me''' ,return_tensors='''pt''' )
UpperCAmelCase__ : Optional[Any] = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase__ : Optional[int] = model.generate(**lowerCamelCase_ )
UpperCAmelCase__ : List[str] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase__ : Union[str, Any] = model_reloaded.generate(**lowerCamelCase_ )
self.assertTrue(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ) )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Dict = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase_ ):
model.save_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase_ )
| 614 | '''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __UpperCamelCase( _A : List[str] , _A : Optional[int] , _A : Dict=8 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase__ : Tuple = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,movq=lowerCamelCase_ ,)
UpperCAmelCase__ : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
if latents is None:
UpperCAmelCase__ : Dict = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ,device=lowerCamelCase_ ,dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase__ : Optional[Any] = latents.to(lowerCamelCase_ )
UpperCAmelCase__ : Dict = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ,) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = len(lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else 1
# get prompt text embeddings
UpperCAmelCase__ : Tuple = self.tokenizer(
lowerCamelCase_ ,padding='''max_length''' ,truncation=lowerCamelCase_ ,max_length=77 ,return_attention_mask=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_tensors='''pt''' ,)
UpperCAmelCase__ : List[Any] = text_inputs.input_ids
UpperCAmelCase__ : List[str] = self.tokenizer(lowerCamelCase_ ,padding='''longest''' ,return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase__ : Tuple = text_input_ids.to(lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = text_inputs.attention_mask.to(lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.text_encoder(
input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
UpperCAmelCase__ : Tuple = prompt_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : List[str] = text_encoder_hidden_states.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : str = text_mask.repeat_interleave(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ : List[str]
if negative_prompt is None:
UpperCAmelCase__ : List[Any] = [''''''] * batch_size
elif type(lowerCamelCase_ ) is not type(lowerCamelCase_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_ )} !='''
f''' {type(lowerCamelCase_ )}.''' )
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : Dict = [negative_prompt]
elif batch_size != len(lowerCamelCase_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase__ : Dict = negative_prompt
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(
lowerCamelCase_ ,padding='''max_length''' ,max_length=77 ,truncation=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_tensors='''pt''' ,)
UpperCAmelCase__ : Optional[Any] = uncond_input.input_ids.to(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = uncond_input.attention_mask.to(lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.text_encoder(
input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ : Optional[int] = negative_prompt_embeds.shape[1]
UpperCAmelCase__ : List[Any] = negative_prompt_embeds.repeat(1 ,lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,lowerCamelCase_ )
UpperCAmelCase__ : int = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase__ : int = uncond_text_encoder_hidden_states.repeat(1 ,lowerCamelCase_ ,1 )
UpperCAmelCase__ : List[str] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,lowerCamelCase_ ,-1 )
UpperCAmelCase__ : Union[str, Any] = uncond_text_mask.repeat_interleave(lowerCamelCase_ ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase__ : List[str] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase__ : Dict = torch.device(f'''cuda:{gpu_id}''' )
UpperCAmelCase__ : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' ,'''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase__ : str = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' ,silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ : Optional[Any] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cpu_offload_with_hook(lowerCamelCase_ ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ )
if self.safety_checker is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cpu_offload_with_hook(self.safety_checker ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
UpperCAmelCase__ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not hasattr(self.unet ,'''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ ,'''_hf_hook''' )
and hasattr(module._hf_hook ,'''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = 512 ,lowerCamelCase_ = 512 ,lowerCamelCase_ = 100 ,lowerCamelCase_ = 4.0 ,lowerCamelCase_ = 1 ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = "pil" ,lowerCamelCase_ = True ,) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = 1
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = len(lowerCamelCase_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}''' )
UpperCAmelCase__ : Union[str, Any] = self._execution_device
UpperCAmelCase__ : Optional[int] = batch_size * num_images_per_prompt
UpperCAmelCase__ : Dict = guidance_scale > 1.0
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._encode_prompt(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = torch.cat(lowerCamelCase_ ,dim=0 )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : int = torch.cat(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ : Optional[Any] = image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : Optional[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ ,device=lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.scheduler.timesteps
UpperCAmelCase__ : List[Any] = self.unet.config.in_channels
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = get_new_h_w(lowerCamelCase_ ,lowerCamelCase_ ,self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ : Tuple = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
UpperCAmelCase__ : int = self.unet(
sample=lowerCamelCase_ ,timestep=lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,added_cond_kwargs=lowerCamelCase_ ,return_dict=lowerCamelCase_ ,)[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = noise_pred.split(latents.shape[1] ,dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ : Any = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = variance_pred.chunk(2 )
UpperCAmelCase__ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ : List[str] = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : Tuple = self.scheduler.step(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ,).prev_sample
# post-processing
UpperCAmelCase__ : Optional[int] = self.movq.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase__ : Optional[Any] = image * 0.5 + 0.5
UpperCAmelCase__ : Any = image.clamp(0 ,1 )
UpperCAmelCase__ : str = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ : List[Any] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 614 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = "van"
def __init__( self : Any , lowerCamelCase : int=224 , lowerCamelCase : Dict=3 , lowerCamelCase : Optional[Any]=[7, 3, 3, 3] , lowerCamelCase : Dict=[4, 2, 2, 2] , lowerCamelCase : List[str]=[64, 128, 320, 512] , lowerCamelCase : str=[3, 3, 12, 3] , lowerCamelCase : List[Any]=[8, 8, 4, 4] , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : str=0.02 , lowerCamelCase : Union[str, Any]=1E-6 , lowerCamelCase : Dict=1E-2 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Any=0.0 , **lowerCamelCase : Union[str, Any] , ) -> List[Any]:
super().__init__(**lowerCamelCase )
__snake_case : Optional[int] = image_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Optional[Any] = patch_sizes
__snake_case : Tuple = strides
__snake_case : Tuple = hidden_sizes
__snake_case : int = depths
__snake_case : Optional[int] = mlp_ratios
__snake_case : Optional[Any] = hidden_act
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = layer_norm_eps
__snake_case : int = layer_scale_init_value
__snake_case : str = drop_path_rate
__snake_case : Tuple = dropout_rate
| 707 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_snake_case : Any = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
# Recurse if needed
if "." in tensor_name:
__snake_case : Tuple = tensor_name.split("." )
for split in splits[:-1]:
__snake_case : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
__snake_case : Optional[int] = new_module
__snake_case : Union[str, Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
__snake_case : Optional[int] = tensor_name in module._buffers
__snake_case : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
__snake_case : List[Any] = False
__snake_case : Optional[int] = False
if is_buffer or not is_bitsandbytes_available():
__snake_case : Dict = False
__snake_case : Optional[int] = False
else:
__snake_case : Any = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__snake_case : Union[str, Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__snake_case : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__snake_case : Any = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
__snake_case : int = value.to("cpu" )
if value.dtype == torch.inta:
__snake_case : Optional[int] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
__snake_case : Optional[int] = torch.tensor(__lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCamelCase ) and fpaa_statistics is None:
__snake_case : List[Any] = new_value.T
__snake_case : Tuple = old_value.__dict__
if is_abit:
__snake_case : List[str] = bnb.nn.IntaParams(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
elif is_abit:
__snake_case : Optional[int] = bnb.nn.Paramsabit(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
__snake_case : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(__lowerCamelCase ) )
else:
if value is None:
__snake_case : Tuple = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
__snake_case : Dict = value.to(__lowerCamelCase )
else:
__snake_case : List[Any] = torch.tensor(__lowerCamelCase , device=__lowerCamelCase )
if is_buffer:
__snake_case : Optional[Any] = new_value
else:
__snake_case : int = nn.Parameter(__lowerCamelCase , requires_grad=old_value.requires_grad )
__snake_case : Tuple = new_value
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False ):
for name, module in model.named_children():
if current_key_name is None:
__snake_case : List[Any] = []
current_key_name.append(__lowerCamelCase )
if (isinstance(__lowerCamelCase , nn.Linear ) or isinstance(__lowerCamelCase , __lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(__lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Union[str, Any] = module.weight.shape
else:
__snake_case : List[Any] = module.in_features
__snake_case : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__snake_case : Tuple = bnb.nn.LinearabitLt(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__snake_case : List[str] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__snake_case : Optional[int] = bnb.nn.Linearabit(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__snake_case : int = True
# Store the module class in case we need to transpose the weight later
__snake_case : str = type(__lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCamelCase )
if len(list(module.children() ) ) > 0:
__snake_case , __snake_case : Tuple = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_been_replaced=__lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ):
__snake_case : List[str] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
__snake_case , __snake_case : Any = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , __lowerCamelCase , )
return replace_with_bnb_linear(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , __lowerCamelCase , )
return set_module_quantized_tensor_to_device(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__snake_case : Any = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__snake_case : Any = sum(__lowerCamelCase , [] )
__snake_case : Optional[int] = len(__lowerCamelCase ) > 0
# Check if it is a base model
__snake_case : Dict = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__snake_case : Optional[int] = list(model.named_children() )
__snake_case : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
__snake_case : int = set(__lowerCamelCase ) - set(__lowerCamelCase )
__snake_case : Optional[Any] = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
__snake_case : Optional[Any] = [".weight", ".bias"]
__snake_case : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__snake_case : Optional[int] = name.replace(__lowerCamelCase , "" )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
| 203 | 0 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = text, pattern
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = len(lowerCAmelCase__ ), len(lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE_ : Any = self.mismatch_in_text(lowerCAmelCase__ )
if mismatch_index == -1:
positions.append(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE_ : List[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase__ : Optional[int] ='ABAABA'
lowerCAmelCase__ : List[str] ='AB'
lowerCAmelCase__ : Optional[int] =BoyerMooreSearch(text, pattern)
lowerCAmelCase__ : Tuple =bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 101 |
from ...processing_utils import ProcessorMixin
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """WhisperFeatureExtractor"""
_UpperCAmelCase = """WhisperTokenizer"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : List[Any] = False
def UpperCamelCase__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ , language=lowerCAmelCase__ , no_timestamps=lowerCAmelCase__ )
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('audio' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('sampling_rate' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('text' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE_ : List[Any] = args[0]
SCREAMING_SNAKE_CASE_ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
| 101 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = os.path.dirname(os.path.realpath(UpperCAmelCase__ ) )
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'words.txt' )
__lowerCAmelCase = ''
with open(UpperCAmelCase__ ) as f:
__lowerCAmelCase = f.readline()
__lowerCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
__lowerCAmelCase = [
word
for word in [sum(ord(UpperCAmelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 715 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
__lowerCAmelCase = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
__lowerCAmelCase = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
__lowerCAmelCase = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
__lowerCAmelCase = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
__lowerCAmelCase = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
__lowerCAmelCase = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
__lowerCAmelCase = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(UpperCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split('.' )
__lowerCAmelCase, __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split('.' )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(UpperCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__="groupvit-gcc-yfcc" , UpperCAmelCase__=False ):
"""simple docstring"""
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(UpperCAmelCase__ ).eval()
__lowerCAmelCase = torch.load(UpperCAmelCase__ , map_location='cpu' )['model']
__lowerCAmelCase = convert_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase, __lowerCAmelCase = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase__ ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='pt' )
with torch.no_grad():
__lowerCAmelCase = model(**UpperCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase__ , atol=1E-3 )
processor.save_pretrained(UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
print('Successfully saved processor and model to' , UpperCAmelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase__ , organization='nielsr' )
model.push_to_hub(UpperCAmelCase__ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
lowerCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 102 | 0 |
from timeit import timeit
A : List[Any] = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : str ) -> bool:
_lowercase = 0
_lowercase = len(SCREAMING_SNAKE_CASE_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : str ) -> bool:
_lowercase = len(SCREAMING_SNAKE_CASE_ ) // 2
_lowercase = len(SCREAMING_SNAKE_CASE_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : str ) -> bool:
if len(SCREAMING_SNAKE_CASE_ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : str ) -> bool:
return s == s[::-1]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : str ) -> None:
_lowercase = f"""all({name}(key) is value for key, value in test_data.items())"""
_lowercase = f"""from __main__ import test_data, {name}"""
_lowercase = 50_00_00
_lowercase = timeit(stmt=SCREAMING_SNAKE_CASE_ , setup=SCREAMING_SNAKE_CASE_ , number=SCREAMING_SNAKE_CASE_ )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'{key:21} {value}')
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''') | 287 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
_lowercase = [0 for i in range(n + 1 )]
_lowercase = 1
_lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_lowercase = 1
_lowercase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }') | 287 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase_ = random.Random()
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]=1.0 , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=None ) -> int:
'''simple docstring'''
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=400 , lowerCAmelCase_=2000 , lowerCAmelCase_=1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1_6000 , lowerCAmelCase_=True , lowerCAmelCase_=True , ) -> int:
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = feature_size
_snake_case = padding_value
_snake_case = sampling_rate
_snake_case = return_attention_mask
_snake_case = do_normalize
def lowerCAmelCase ( self ) -> List[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
def _flatten(lowerCAmelCase_ ):
return list(itertools.chain(*lowerCAmelCase_ ) )
if equal_length:
_snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = WavaVecaFeatureExtractor
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
self.assertTrue(np.all(np.mean(lowerCAmelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase ( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
_snake_case = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test batched
_snake_case = feat_extract(lowerCAmelCase_ , return_tensors='np' ).input_values
_snake_case = feat_extract(lowerCAmelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case = np.asarray(lowerCAmelCase_ )
_snake_case = feat_extract(lowerCAmelCase_ , return_tensors='np' ).input_values
_snake_case = feat_extract(lowerCAmelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = ['longest', 'max_length', 'do_not_pad']
_snake_case = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = feat_extract(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors='np' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = range(800 , 1400 , 200 )
_snake_case = [floats_list((1, x) )[0] for x in lengths]
_snake_case = ['longest', 'max_length', 'do_not_pad']
_snake_case = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = feat_extract(lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=1000 , padding='max_length' , return_tensors='np' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=1000 , padding='longest' , return_tensors='np' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=2000 , padding='longest' , return_tensors='np' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
import torch
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = np.random.rand(100 ).astype(np.floataa )
_snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_snake_case = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase ( self ) -> Any:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_snake_case = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
_snake_case = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
| 541 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 541 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCAmelCase_ ( *lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =list(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
__magic_name__ : Dict =None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowerCamelCase , lowerCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCAmelCase_ ( lowerCamelCase = None , lowerCamelCase = 128 ):
if function is None:
return functools.partial(lowerCamelCase , starting_batch_size=lowerCamelCase )
__magic_name__ : List[Any] =starting_batch_size
def decorator(*lowerCamelCase , **lowerCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__magic_name__ : Optional[Any] =list(inspect.signature(lowerCamelCase ).parameters.keys() )
# Guard against user error
if len(lowerCamelCase ) < (len(lowerCamelCase ) + 1):
__magic_name__ : Optional[int] =""", """.join([F"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"Batch size was passed into `{function.__name__}` as the first argument when called."
F"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
except Exception as e:
if should_reduce_batch_size(lowerCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'gpt_bigcode'
__UpperCAmelCase : Tuple = ['past_key_values']
__UpperCAmelCase : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=50_257 , _a=1_024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = attention_softmax_in_fpaa
__a = scale_attention_softmax_in_fpaa
__a = multi_query
__a = bos_token_id
__a = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 695 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _UpperCamelCase ):
__lowercase = torch.load(_UpperCamelCase , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(_UpperCamelCase , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(_UpperCamelCase )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(_UpperCamelCase , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
__lowercase = load_checkpoint(_UpperCamelCase )
if config is not None:
__lowercase = OPTConfig.from_pretrained(_UpperCamelCase )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(_UpperCamelCase ).half().eval()
model.load_state_dict(_UpperCamelCase )
# Check results
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
a : Union[str, Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 717 |
a : Any = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 527 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a_ , a_=7 , a_=3 , a_=1_8 , a_=3_0 , a_=4_0_0 , a_=True , a_=None , a_=True , a_=None , a_=True , ) -> Optional[int]:
lowercase : Optional[Any] = size if size is not None else {"shortest_edge": 2_0}
lowercase : Tuple = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
lowercase : Optional[int] = parent
lowercase : Union[str, Any] = batch_size
lowercase : str = num_channels
lowercase : List[str] = image_size
lowercase : int = min_resolution
lowercase : List[Any] = max_resolution
lowercase : Union[str, Any] = do_resize
lowercase : Union[str, Any] = size
lowercase : str = do_center_crop
lowercase : Optional[int] = crop_size
lowercase : Optional[Any] = do_flip_channel_order
def a__ ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = MobileViTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Tuple:
lowercase : List[Any] = MobileViTImageProcessingTester(self )
@property
def a__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Dict:
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "do_center_crop" ) )
self.assertTrue(hasattr(a_ , "center_crop" ) )
self.assertTrue(hasattr(a_ , "do_flip_channel_order" ) )
def a__ ( self ) -> Dict:
lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
lowercase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def a__ ( self ) -> Optional[Any]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowercase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase : List[Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a__ ( self ) -> Tuple:
# Initialize image_processing
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase : Optional[Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowercase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase : Tuple = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 372 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Dict = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 372 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_a = logging.get_logger(__name__)
_a = Dict[str, Any]
_a = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , *__a , **__a) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*__a , **__a)
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , '''vision''')
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def UpperCAmelCase ( self , **__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self , *__a , **__a) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*__a , **__a)
def UpperCAmelCase ( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = load_image(__a)
_UpperCamelCase = torch.IntTensor([[image.height, image.width]])
_UpperCamelCase = self.image_processor(images=[image] , return_tensors='''pt''')
if self.tokenizer is not None:
_UpperCamelCase = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''')
_UpperCamelCase = target_size
return inputs
def UpperCAmelCase ( self , __a) -> int:
'''simple docstring'''
_UpperCamelCase = model_inputs.pop('''target_size''')
_UpperCamelCase = self.model(**__a)
_UpperCamelCase = outputs.__class__({'''target_size''': target_size, **outputs})
if self.tokenizer is not None:
_UpperCamelCase = model_inputs['''bbox''']
return model_outputs
def UpperCAmelCase ( self , __a , __a=0.9) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCamelCase , _UpperCamelCase = target_size[0].tolist()
def unnormalize(__a):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
]))
_UpperCamelCase , _UpperCamelCase = model_outputs['''logits'''].squeeze(0).softmax(dim=-1).max(dim=-1)
_UpperCamelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCamelCase = [unnormalize(__a) for bbox in model_outputs['''bbox'''].squeeze(0)]
_UpperCamelCase = ['''score''', '''label''', '''box''']
_UpperCamelCase = [dict(zip(__a , __a)) for vals in zip(scores.tolist() , __a , __a) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCamelCase = self.image_processor.post_process_object_detection(__a , __a , __a)
_UpperCamelCase = raw_annotations[0]
_UpperCamelCase = raw_annotation['''scores''']
_UpperCamelCase = raw_annotation['''labels''']
_UpperCamelCase = raw_annotation['''boxes''']
_UpperCamelCase = scores.tolist()
_UpperCamelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCamelCase = [self._get_bounding_box(__a) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCamelCase = ['''score''', '''label''', '''box''']
_UpperCamelCase = [
dict(zip(__a , __a))
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''])
]
return annotation
def UpperCAmelCase ( self , __a) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''')
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 78 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__a , __a):
_UpperCamelCase = v.to_dict()
return d
| 78 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : list[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
_a = 0
print(UpperCamelCase , end=''',''' )
# Consider rest of the activities
for j in range(UpperCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCamelCase , end=''',''' )
_a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Optional[Any] = [1, 3, 0, 5, 8, 5]
_snake_case : Tuple = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 22 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = {'facebook/bart-base': BartForConditionalGeneration}
UpperCamelCase__ = {'facebook/bart-base': BartTokenizer}
def UpperCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=UpperCAmelCase_ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCAmelCase_ , )
parser.add_argument(
'''--config_name''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=UpperCAmelCase_ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''Where to store the final ONNX file.''' )
_lowercase : Tuple = parser.parse_args()
return args
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_="cpu" ) -> str:
'''simple docstring'''
_lowercase : int = model_dict[model_name].from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
_lowercase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(UpperCAmelCase_ )
if model_name in ["facebook/bart-base"]:
_lowercase : Tuple = 0
_lowercase : List[str] = None
_lowercase : Tuple = 0
return huggingface_model, tokenizer
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
'''simple docstring'''
model.eval()
_lowercase : int = None
_lowercase : Optional[int] = torch.jit.script(BARTBeamSearchGenerator(UpperCAmelCase_ ) )
with torch.no_grad():
_lowercase : str = '''My friends are cool but they eat too many carbs.'''
_lowercase : Tuple = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
_lowercase : List[Any] = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=UpperCAmelCase_ , max_length=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCAmelCase_ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCAmelCase_ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=UpperCAmelCase_ , )
logger.info('''Model exported to {}'''.format(UpperCAmelCase_ ) )
_lowercase : Dict = remove_dup_initializers(os.path.abspath(UpperCAmelCase_ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(UpperCAmelCase_ ) )
_lowercase : int = onnxruntime.InferenceSession(UpperCAmelCase_ )
_lowercase : str = ort_sess.run(
UpperCAmelCase_ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(UpperCAmelCase_ ),
'''max_length''': np.array(UpperCAmelCase_ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
_lowercase : Union[str, Any] = parse_args()
_lowercase : int = 5
_lowercase : List[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Any = torch.device(args.device )
_lowercase , _lowercase : Tuple = load_model_tokenizer(args.model_name_or_path , UpperCAmelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(UpperCAmelCase_ )
if args.max_length:
_lowercase : Tuple = args.max_length
if args.num_beams:
_lowercase : Optional[int] = args.num_beams
if args.output_file_path:
_lowercase : Dict = args.output_file_path
else:
_lowercase : Tuple = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main() | 322 | 0 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=0):
UpperCAmelCase__ : Dict = 1.0 if scale is None else scale
UpperCAmelCase__ : Dict = 0.0 if loc is None else loc
super().__init__(_lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowerCamelCase)])
@property
def snake_case__ ( self):
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case__ ( self):
return self.base_dist.variance * self.scale**2
@property
def snake_case__ ( self):
return self.variance.sqrt()
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase):
super().__init__(**_lowerCamelCase)
UpperCAmelCase__ : int = args_dim
UpperCAmelCase__ : Optional[int] = nn.ModuleList([nn.Linear(_lowerCamelCase , _lowerCamelCase) for dim in args_dim.values()])
UpperCAmelCase__ : Optional[Any] = domain_map
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = [proj(_lowerCamelCase) for proj in self.proj]
return self.domain_map(*_lowerCamelCase)
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase):
super().__init__()
UpperCAmelCase__ : Optional[int] = function
def snake_case__ ( self , _lowerCamelCase , *_lowerCamelCase):
return self.function(_lowerCamelCase , *_lowerCamelCase)
class _snake_case :
lowerCAmelCase :type
lowerCAmelCase :int
lowerCAmelCase :Dict[str, int]
def __init__( self , _lowerCamelCase = 1):
UpperCAmelCase__ : Optional[Any] = dim
UpperCAmelCase__ : int = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case__ ( self , _lowerCamelCase):
if self.dim == 1:
return self.distribution_class(*_lowerCamelCase)
else:
return Independent(self.distribution_class(*_lowerCamelCase) , 1)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ):
UpperCAmelCase__ : Dict = self._base_distribution(_lowerCamelCase)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowerCamelCase , loc=_lowerCamelCase , scale=_lowerCamelCase , event_dim=self.event_dim)
@property
def snake_case__ ( self):
return () if self.dim == 1 else (self.dim,)
@property
def snake_case__ ( self):
return len(self.event_shape)
@property
def snake_case__ ( self):
return 0.0
def snake_case__ ( self , _lowerCamelCase):
return ParameterProjection(
in_features=_lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def snake_case__ ( self , *_lowerCamelCase):
raise NotImplementedError()
@staticmethod
def snake_case__ ( _lowerCamelCase):
return (x + torch.sqrt(torch.square(_lowerCamelCase) + 4.0)) / 2.0
class _snake_case ( a__ ):
lowerCAmelCase :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase :type = StudentT
@classmethod
def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = cls.squareplus(_lowerCamelCase).clamp_min(torch.finfo(scale.dtype).eps)
UpperCAmelCase__ : Any = 2.0 + cls.squareplus(_lowerCamelCase)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class _snake_case ( a__ ):
lowerCAmelCase :Dict[str, int] = {"loc": 1, "scale": 1}
lowerCAmelCase :type = Normal
@classmethod
def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = cls.squareplus(_lowerCamelCase).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class _snake_case ( a__ ):
lowerCAmelCase :Dict[str, int] = {"total_count": 1, "logits": 1}
lowerCAmelCase :type = NegativeBinomial
@classmethod
def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = cls.squareplus(_lowerCamelCase)
return total_count.squeeze(-1), logits.squeeze(-1)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Tuple = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowerCamelCase , logits=_lowerCamelCase)
else:
return Independent(self.distribution_class(total_count=_lowerCamelCase , logits=_lowerCamelCase) , 1)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None):
UpperCAmelCase__ : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 706 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__A =logging.getLogger()
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
return args.f
class _snake_case ( a__ ):
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Any = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_lowerCamelCase , 0.666)
@slow
@require_torch_non_multi_gpu
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_lowerCamelCase)
UpperCAmelCase__ : Dict = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowerCamelCase)
UpperCAmelCase__ : Any = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowerCamelCase) | 113 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, Iterable[int]] , lowerCAmelCase_ : bool , lowerCAmelCase_ : int ):
def constraint_to_multiple_of(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : int=None ):
__lowercase : Optional[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowercase : List[str] = math.floor(val / multiple ) * multiple
if x < min_val:
__lowercase : Tuple = math.ceil(val / multiple ) * multiple
return x
__lowercase : int = (output_size, output_size) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else output_size
__lowercase , __lowercase : Any = get_image_size(lowerCAmelCase_ )
__lowercase , __lowercase : Dict = output_size
# determine new height and width
__lowercase : Tuple = output_height / input_height
__lowercase : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowercase : Union[str, Any] = scale_width
else:
# fit height
__lowercase : Tuple = scale_height
__lowercase : Optional[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase_ )
__lowercase : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase_ )
return (new_height, new_width)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[Any] = ['''pixel_values''']
def __init__( self : List[Any] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = False , __a : int = 1 , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : int , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Optional[Any] = size if size is not None else {"""height""": 384, """width""": 384}
__lowercase : Tuple = get_size_dict(__a )
__lowercase : List[Any] = do_resize
__lowercase : Dict = size
__lowercase : List[Any] = keep_aspect_ratio
__lowercase : int = ensure_multiple_of
__lowercase : Optional[int] = resample
__lowercase : Dict = do_rescale
__lowercase : int = rescale_factor
__lowercase : Any = do_normalize
__lowercase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : List[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : bool = False , __a : int = 1 , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Optional[int] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
__lowercase : Dict = get_resize_output_image_size(
__a , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=__a , multiple=__a , )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[Any] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Dict:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : ImageInput , __a : bool = None , __a : int = None , __a : bool = None , __a : int = None , __a : PILImageResampling = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : int = do_resize if do_resize is not None else self.do_resize
__lowercase : List[Any] = size if size is not None else self.size
__lowercase : List[str] = get_size_dict(__a )
__lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowercase : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowercase : int = resample if resample is not None else self.resample
__lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Dict = image_mean if image_mean is not None else self.image_mean
__lowercase : Any = image_std if image_std is not None else self.image_std
__lowercase : Any = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : List[str] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
__lowercase : List[str] = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Dict = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
def lowerCAmelCase ( self : Optional[Any] , __a : int , __a : List[Tuple] = None ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a ) != len(__a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__a ):
__lowercase : Optional[Any] = target_sizes.numpy()
__lowercase : List[Any] = []
for idx in range(len(__a ) ):
__lowercase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__a )
__lowercase : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__a )
else:
__lowercase : int = logits.argmax(dim=1 )
__lowercase : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 149 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : str=7 , __a : Union[str, Any]=3 , __a : Optional[int]=18 , __a : Dict=30 , __a : Dict=400 , __a : int=True , __a : Dict=None , __a : Optional[int]=True , ) -> Tuple:
"""simple docstring"""
__lowercase : int = size if size is not None else {"""height""": 18, """width""": 18}
__lowercase : Any = parent
__lowercase : List[Any] = batch_size
__lowercase : Tuple = num_channels
__lowercase : Dict = image_size
__lowercase : Optional[Any] = min_resolution
__lowercase : str = max_resolution
__lowercase : Optional[Any] = do_resize
__lowercase : Optional[Any] = size
__lowercase : List[str] = do_normalize
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """clusters""" ) )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : Any = self.image_processing_class(**self.image_processor_dict )
__lowercase : List[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , obj[key] ) )
else:
self.assertEqual(obj[key] , __a )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : str = os.path.join(__a , """image_processor.json""" )
image_processor_first.to_json_file(__a )
__lowercase : Optional[Any] = self.image_processing_class.from_json_file(__a ).to_dict()
__lowercase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__a )
__lowercase : List[Any] = self.image_processing_class.from_pretrained(__a ).to_dict()
__lowercase : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __a )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case_ ( ):
__lowercase : Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
__lowercase : Optional[Any] = Image.open(dataset[4]["""file"""] )
__lowercase : Union[str, Any] = Image.open(dataset[5]["""file"""] )
__lowercase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
__lowercase : int = prepare_images()
# test non-batched
__lowercase : List[str] = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
__lowercase : Tuple = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __a )
# test batched
__lowercase : List[str] = image_processing(__a , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
__lowercase : Optional[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __a ) | 149 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "The Nymphenburg Palace is a beautiful palace in Munich!"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->Optional[Any]:
_lowerCAmelCase = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_0_2_4,
'''hidden_size''': 7_6_8,
'''max_length''': 5_1_2,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_0_2_4,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
_lowerCAmelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_lowerCAmelCase = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_SCREAMING_SNAKE_CASE , output_all_encodings=_SCREAMING_SNAKE_CASE , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _SCREAMING_SNAKE_CASE ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_lowerCAmelCase = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
_lowerCAmelCase = os.path.join(get_home_dir() , '''models''' )
_lowerCAmelCase = _load_vocab(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cls=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = nlp.model.BERTModel(
_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_SCREAMING_SNAKE_CASE , use_token_type_embed=_SCREAMING_SNAKE_CASE , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_SCREAMING_SNAKE_CASE , use_decoder=_SCREAMING_SNAKE_CASE , )
original_bort.load_parameters(_SCREAMING_SNAKE_CASE , cast_dtype=_SCREAMING_SNAKE_CASE , ignore_extra=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
_lowerCAmelCase = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_SCREAMING_SNAKE_CASE ),
}
_lowerCAmelCase = BertConfig.from_dict(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = BertForMaskedLM(_SCREAMING_SNAKE_CASE )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_SCREAMING_SNAKE_CASE : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
_lowerCAmelCase = hf_param.shape
_lowerCAmelCase = to_torch(params[gluon_param] )
_lowerCAmelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
_lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
_lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
_lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
_lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_lowerCAmelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_lowerCAmelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
_lowerCAmelCase = layer.attention.self
_lowerCAmelCase = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
_lowerCAmelCase = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
_lowerCAmelCase = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
_lowerCAmelCase = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
_lowerCAmelCase = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
_lowerCAmelCase = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
_lowerCAmelCase = layer.attention.output
_lowerCAmelCase = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
_lowerCAmelCase = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
_lowerCAmelCase = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
_lowerCAmelCase = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
_lowerCAmelCase = layer.intermediate
_lowerCAmelCase = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
_lowerCAmelCase = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
_lowerCAmelCase = layer.output
_lowerCAmelCase = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
_lowerCAmelCase = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
_lowerCAmelCase = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
_lowerCAmelCase = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_lowerCAmelCase = RobertaTokenizer.from_pretrained('''roberta-base''' )
_lowerCAmelCase = tokenizer.encode_plus(_SCREAMING_SNAKE_CASE )['''input_ids''']
# Get gluon output
_lowerCAmelCase = mx.nd.array([input_ids] )
_lowerCAmelCase = original_bort(inputs=_SCREAMING_SNAKE_CASE , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = BertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
hf_bort_model.eval()
_lowerCAmelCase = tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
_lowerCAmelCase = hf_bort_model(**_SCREAMING_SNAKE_CASE )[0]
_lowerCAmelCase = output_gluon[0].asnumpy()
_lowerCAmelCase = output_hf[0].detach().numpy()
_lowerCAmelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_lowerCAmelCase = np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 664 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ") | 664 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def __UpperCAmelCase ( __A , __A , __A ) -> Optional[Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCAmelCase__ = dataset_size < in_memory_max_size
else:
UpperCAmelCase__ = False
UpperCAmelCase__ = is_small_dataset(SCREAMING_SNAKE_CASE__ )
assert result == expected
| 475 |
'''simple docstring'''
__UpperCamelCase : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: bytes ) -> bytes:
"""simple docstring"""
# Make sure the supplied data is a bytes-like object
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
__a = ''.join(bin(SCREAMING_SNAKE_CASE__ )[2:].zfill(8 ) for byte in data )
__a = len(SCREAMING_SNAKE_CASE__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a = b'=' * ((6 - len(SCREAMING_SNAKE_CASE__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE__ ) % 6)
else:
__a = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(SCREAMING_SNAKE_CASE__ ), 6 ) ).encode()
+ padding
)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bytes:
"""simple docstring"""
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = (
'argument should be a bytes-like object or ASCII string, '
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
try:
__a = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__a = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a = encoded_data[:-padding]
__a = ''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a = ''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE__ ) )[2:].zfill(6 ) for char in encoded_data )
__a = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(SCREAMING_SNAKE_CASE__ ), 8 )
]
return bytes(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 448 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Any = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
_snake_case : Any = tuple[float, float, float]
_snake_case : Optional[int] = tuple[float, float, float]
def snake_case_ (UpperCamelCase : Pointad , UpperCamelCase : Pointad ):
'''simple docstring'''
_a = end_pointa[0] - end_pointa[0]
_a = end_pointa[1] - end_pointa[1]
_a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def snake_case_ (UpperCamelCase : Vectorad , UpperCamelCase : Vectorad ):
'''simple docstring'''
_a = ab[1] * ac[2] - ab[2] * ac[1] # *i
_a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def snake_case_ (UpperCamelCase : Vectorad , UpperCamelCase : int ):
'''simple docstring'''
return tuple(round(UpperCamelCase , UpperCamelCase ) for x in vector ) == (0, 0, 0)
def snake_case_ (UpperCamelCase : Pointad , UpperCamelCase : Pointad , UpperCamelCase : Pointad , UpperCamelCase : int = 10 ):
'''simple docstring'''
_a = create_vector(UpperCamelCase , UpperCamelCase )
_a = create_vector(UpperCamelCase , UpperCamelCase )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
| 377 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Any = logging.get_logger(__name__)
A__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : List[Any] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : Union[str, Any] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[Any] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Union[str, Any], lowerCamelCase : List[Any]=None, lowerCamelCase : Tuple=None, lowerCamelCase : Any=True, lowerCamelCase : int="[UNK]", lowerCamelCase : Tuple="[SEP]", lowerCamelCase : List[str]="[PAD]", lowerCamelCase : Union[str, Any]="[CLS]", lowerCamelCase : Optional[int]="[MASK]", lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Dict=None, **lowerCamelCase : Any, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Tuple, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 183 |
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = str(id_ )
lowercase__ = None
lowercase__ = None
lowercase__ = []
lowercase__ = {} # {vertex:distance}
def __lt__( self : Dict, lowerCamelCase : Tuple ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Dict ):
'''simple docstring'''
return self.id
def lowercase__ ( self : str, lowerCamelCase : List[str] ):
'''simple docstring'''
self.neighbors.append(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = weight
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCamelCase_ )
graph[b - 1].add_edge(graph[a - 1] , lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = graph[:]
while q:
lowercase__ = min(lowerCamelCase_ )
q.remove(lowerCamelCase_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
for i in range(1 , len(lowerCamelCase_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = list(lowerCamelCase_ )
hq.heapify(lowerCamelCase_ )
while h:
lowercase__ = hq.heappop(lowerCamelCase_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
hq.heapify(lowerCamelCase_ )
for i in range(1 , len(lowerCamelCase_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _lowercase ( a_ : Any ,a_ : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
try:
__magic_name__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__magic_name__ = default
else:
# KEY is set, convert it to True or False.
try:
__magic_name__ = strtobool(a_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
A__ = parse_flag_from_env("RUN_SLOW", default=False)
def _lowercase ( a_ : List[Any] ) -> Tuple:
'''simple docstring'''
return unittest.skip('Test was skipped' )(a_ )
def _lowercase ( a_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests ,'test is slow' )(a_ )
def _lowercase ( a_ : int ) -> int:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() ,'test requires only a CPU' )(a_ )
def _lowercase ( a_ : Tuple ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() ,'test requires a GPU' )(a_ )
def _lowercase ( a_ : int ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() ,'test requires a XPU' )(a_ )
def _lowercase ( a_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() ,'test requires a `mps` backend support in `torch`' )(a_ )
def _lowercase ( a_ : List[str] ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() ,'test requires the Hugging Face suite' )(a_ )
def _lowercase ( a_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() ,'test requires the bitsandbytes library' )(a_ )
def _lowercase ( a_ : List[str] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() ,'test requires TPU' )(a_ )
def _lowercase ( a_ : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 ,'test requires a GPU' )(a_ )
def _lowercase ( a_ : str ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 ,'test requires a XPU' )(a_ )
def _lowercase ( a_ : int ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 ,'test requires multiple GPUs' )(a_ )
def _lowercase ( a_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 ,'test requires multiple XPUs' )(a_ )
def _lowercase ( a_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() ,'test requires safetensors' )(a_ )
def _lowercase ( a_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() ,'test requires DeepSpeed' )(a_ )
def _lowercase ( a_ : Optional[Any] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' ,'1.12.0' ) ,'test requires torch version >= 1.12.0' )(a_ )
def _lowercase ( a_ : Optional[int]=None ,a_ : Dict=None ) -> Optional[int]:
'''simple docstring'''
if test_case is None:
return partial(a_ ,version=a_ )
return unittest.skipUnless(is_torch_version('>=' ,a_ ) ,F'test requires torch version >= {version}' )(a_ )
def _lowercase ( a_ : Union[str, Any] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() ,'test requires Tensorboard' )(a_ )
def _lowercase ( a_ : str ) -> Any:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() ,'test requires wandb' )(a_ )
def _lowercase ( a_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() ,'test requires comet_ml' )(a_ )
A__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _lowercase ( a_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available ,'test requires at least one tracker to be available and for `comet_ml` to not be installed' ,)(a_ )
class __UpperCamelCase ( unittest.TestCase ):
_lowercase : Optional[int] = True
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: str ):
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[str] ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__UpperCamelCase )
class __UpperCamelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __UpperCamelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , __UpperCamelCase: Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
__magic_name__ = mocks if isinstance(__UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _lowercase ( a_ : List[str] ) -> Dict:
'''simple docstring'''
__magic_name__ = AcceleratorState()
__magic_name__ = tensor[None].clone().to(state.device )
__magic_name__ = gather(a_ ).cpu()
__magic_name__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] ,a_ ):
return False
return True
class __UpperCamelCase :
def __init__( self: Optional[Any] , __UpperCamelCase: str , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Optional[int] ):
'''simple docstring'''
__magic_name__ = returncode
__magic_name__ = stdout
__magic_name__ = stderr
async def _lowercase ( a_ : int ,a_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
while True:
__magic_name__ = await stream.readline()
if line:
callback(a_ )
else:
break
async def _lowercase ( a_ : List[str] ,a_ : List[Any]=None ,a_ : List[str]=None ,a_ : Optional[int]=None ,a_ : List[str]=False ,a_ : List[Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' ,' '.join(a_ ) )
__magic_name__ = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=a_ ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=a_ ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__magic_name__ = []
__magic_name__ = []
def tee(a_ : Optional[int] ,a_ : Dict ,a_ : Any ,a_ : int="" ):
__magic_name__ = line.decode('utf-8' ).rstrip()
sink.append(a_ )
if not quiet:
print(a_ ,a_ ,file=a_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout ,lambda a_ : tee(a_ ,a_ ,sys.stdout ,label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr ,lambda a_ : tee(a_ ,a_ ,sys.stderr ,label='stderr:' ) ) ),
] ,timeout=a_ ,)
return _RunOutput(await p.wait() ,a_ ,a_ )
def _lowercase ( a_ : Union[str, Any] ,a_ : Optional[Any]=None ,a_ : Optional[Any]=None ,a_ : Union[str, Any]=1_8_0 ,a_ : List[str]=False ,a_ : Dict=True ) -> _RunOutput:
'''simple docstring'''
__magic_name__ = asyncio.get_event_loop()
__magic_name__ = loop.run_until_complete(
_stream_subprocess(a_ ,env=a_ ,stdin=a_ ,timeout=a_ ,quiet=a_ ,echo=a_ ) )
__magic_name__ = ' '.join(a_ )
if result.returncode > 0:
__magic_name__ = '\n'.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
pass
def _lowercase ( a_ : List[str] ,a_ : str=False ) -> List[Any]:
'''simple docstring'''
try:
__magic_name__ = subprocess.check_output(a_ ,stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(a_ ,'decode' ):
__magic_name__ = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(a_ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 184 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Dict = StableDiffusionDiffEditPipeline
_lowercase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
_lowercase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
_lowercase : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : int = frozenset([] )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , )
__magic_name__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
__magic_name__ = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_zero=__UpperCamelCase , )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
__magic_name__ = CLIPTextModel(__UpperCamelCase )
__magic_name__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__magic_name__ = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: List[Any]=0 ):
'''simple docstring'''
__magic_name__ = floats_tensor((1, 16, 16) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__magic_name__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
__magic_name__ = torch.manual_seed(__UpperCamelCase )
else:
__magic_name__ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__magic_name__ = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self: str , __UpperCamelCase: str , __UpperCamelCase: Optional[int]=0 ):
'''simple docstring'''
__magic_name__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' )
if str(__UpperCamelCase ).startswith('mps' ):
__magic_name__ = torch.manual_seed(__UpperCamelCase )
else:
__magic_name__ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__magic_name__ = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self: List[Any] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Optional[int]=0 ):
'''simple docstring'''
__magic_name__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' )
if str(__UpperCamelCase ).startswith('mps' ):
__magic_name__ = torch.manual_seed(__UpperCamelCase )
else:
__magic_name__ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__magic_name__ = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.pipeline_class , '_optional_components' ):
return
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__magic_name__ = self.get_dummy_inputs(__UpperCamelCase )
__magic_name__ = pipe(**__UpperCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCamelCase )
__magic_name__ = self.pipeline_class.from_pretrained(__UpperCamelCase )
pipe_loaded.to(__UpperCamelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__UpperCamelCase , __UpperCamelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
__magic_name__ = self.get_dummy_inputs(__UpperCamelCase )
__magic_name__ = pipe_loaded(**__UpperCamelCase )[0]
__magic_name__ = np.abs(output - output_loaded ).max()
self.assertLess(__UpperCamelCase , 1E-4 )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = 'cpu'
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__magic_name__ = self.get_dummy_mask_inputs(__UpperCamelCase )
__magic_name__ = pipe.generate_mask(**__UpperCamelCase )
__magic_name__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__magic_name__ = np.array([0] * 9 )
__magic_name__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
__magic_name__ = 'cpu'
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__magic_name__ = self.get_dummy_inversion_inputs(__UpperCamelCase )
__magic_name__ = pipe.invert(**__UpperCamelCase ).images
__magic_name__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__magic_name__ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__magic_name__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1E-3 )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = 'cpu'
__magic_name__ = self.get_dummy_components()
__magic_name__ = {'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
__magic_name__ = DPMSolverMultistepScheduler(**__UpperCamelCase )
__magic_name__ = DPMSolverMultistepInverseScheduler(**__UpperCamelCase )
__magic_name__ = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__magic_name__ = self.get_dummy_inversion_inputs(__UpperCamelCase )
__magic_name__ = pipe.invert(**__UpperCamelCase ).images
__magic_name__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__magic_name__ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__magic_name__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1E-3 )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[str] ):
'''simple docstring'''
__magic_name__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
__magic_name__ = raw_image.convert('RGB' ).resize((7_68, 7_68) )
__magic_name__ = raw_image
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
__magic_name__ = DDIMScheduler.from_config(pipe.scheduler.config )
__magic_name__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__magic_name__ = 'a bowl of fruit'
__magic_name__ = 'a bowl of pears'
__magic_name__ = pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCamelCase , target_prompt=__UpperCamelCase , generator=__UpperCamelCase , )
__magic_name__ = pipe.invert(
prompt=__UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCamelCase ).latents
__magic_name__ = pipe(
prompt=__UpperCamelCase , mask_image=__UpperCamelCase , image_latents=__UpperCamelCase , generator=__UpperCamelCase , negative_prompt=__UpperCamelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
__magic_name__ = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
__magic_name__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__magic_name__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__magic_name__ = 'a bowl of fruit'
__magic_name__ = 'a bowl of pears'
__magic_name__ = pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCamelCase , target_prompt=__UpperCamelCase , generator=__UpperCamelCase , )
__magic_name__ = pipe.invert(
prompt=__UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCamelCase , num_inference_steps=25 , ).latents
__magic_name__ = pipe(
prompt=__UpperCamelCase , mask_image=__UpperCamelCase , image_latents=__UpperCamelCase , generator=__UpperCamelCase , negative_prompt=__UpperCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
__magic_name__ = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 184 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase : Tuple = logging.get_logger(__name__)
class A__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : str , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Any ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 37 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 188 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
SCREAMING_SNAKE_CASE__ = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
SCREAMING_SNAKE_CASE__ = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
SCREAMING_SNAKE_CASE__ = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def _a ( self : Tuple , _snake_case : List[List[List[str]]] , _snake_case : List[List[str]] , _snake_case : int = 1 , _snake_case : int = 4 , ):
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case )
}
| 52 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _UpperCAmelCase , )
class lowercase__ ( _UpperCAmelCase ):
A__ : Any =RobertaConfig
A__ : Optional[Any] ="""roberta"""
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Any] ):
super().__init__(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = RobertaEmbeddings(UpperCAmelCase_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _UpperCAmelCase , )
class lowercase__ ( _UpperCAmelCase ):
A__ : Any =RobertaConfig
A__ : Optional[Any] ="""roberta"""
def __init__( self : Tuple , UpperCAmelCase_ : int ):
super().__init__(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = config.num_hidden_layers
SCREAMING_SNAKE_CASE__ = DeeRobertaModel(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : List[Any]=False , ):
SCREAMING_SNAKE_CASE__ = self.num_layers
try:
SCREAMING_SNAKE_CASE__ = self.roberta(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = outputs[1]
SCREAMING_SNAKE_CASE__ = self.dropout(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.classifier(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE__ = e.message
SCREAMING_SNAKE_CASE__ = e.exit_layer
SCREAMING_SNAKE_CASE__ = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE__ = entropy(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ = MSELoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE__ = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ = MSELoss()
SCREAMING_SNAKE_CASE__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase_ )
if train_highway:
SCREAMING_SNAKE_CASE__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE__ = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 472 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__snake_case = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ = 'weight'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ = UniSpeechSatConfig.from_pretrained(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE__ = ''
if is_finetuned:
SCREAMING_SNAKE_CASE__ = UniSpeechSatForCTC(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = UniSpeechSatForPreTraining(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__snake_case = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 472 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Dict , UpperCamelCase: Any , UpperCamelCase: List[Any]=7 , UpperCamelCase: Tuple=3 , UpperCamelCase: int=30 , UpperCamelCase: str=4_00 , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Any=None , UpperCamelCase: Optional[int]=True , UpperCamelCase: Optional[Any]=[0.5, 0.5, 0.5] , UpperCamelCase: Union[str, Any]=[0.5, 0.5, 0.5] , UpperCamelCase: Dict=True , UpperCamelCase: Tuple=1 / 2_55 , UpperCamelCase: List[str]=True , ) -> Optional[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_pad
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Union[str, Any] , UpperCamelCase: str=False ) -> Dict:
if not batched:
snake_case__ = image_inputs[0]
if isinstance(_a , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size['shortest_edge'] * h / w )
snake_case__ = self.size['shortest_edge']
elif w > h:
snake_case__ = self.size['shortest_edge']
snake_case__ = int(self.size['shortest_edge'] * w / h )
else:
snake_case__ = self.size['shortest_edge']
snake_case__ = self.size['shortest_edge']
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(_a , key=lambda UpperCamelCase : item[0] )[0]
snake_case__ = max(_a , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DeformableDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case__ = DeformableDetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self: int ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: Optional[Any] ) -> Any:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'image_mean' ) )
self.assertTrue(hasattr(_a , 'image_std' ) )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_resize' ) )
self.assertTrue(hasattr(_a , 'do_rescale' ) )
self.assertTrue(hasattr(_a , 'do_pad' ) )
self.assertTrue(hasattr(_a , 'size' ) )
def lowerCAmelCase_ ( self: Tuple ) -> Union[str, Any]:
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _a )
snake_case__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_a )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _a )
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
pass
def lowerCAmelCase_ ( self: str ) -> List[Any]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
snake_case__ = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
# prepare image and target
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
snake_case__ = DeformableDetrImageProcessor()
snake_case__ = image_processing(images=_a , annotations=_a , return_tensors='pt' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _a )
snake_case__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_887.9_600, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _a ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _a )
snake_case__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _a , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _a ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _a ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _a ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _a ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _a ) )
@slow
def lowerCAmelCase_ ( self: int ) -> Any:
# prepare image, target and masks_path
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
snake_case__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case__ = DeformableDetrImageProcessor(format='coco_panoptic' )
snake_case__ = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors='pt' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _a )
snake_case__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _a ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _a )
snake_case__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _a , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _a ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _a ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _a ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _a )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _a ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _a ) )
| 721 |
def a_ ( _A , _A ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
snake_case__ = str(bin(_A ) )
binary_number += "0" * shift_amount
return binary_number
def a_ ( _A , _A ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
snake_case__ = str(bin(_A ) )[2:]
if shift_amount >= len(_A ):
return "0b0"
snake_case__ = binary_number[: len(_A ) - shift_amount]
return "0b" + shifted_binary_number
def a_ ( _A , _A ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
snake_case__ = '0' + str(bin(_A ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case__ = len(bin(_A )[3:] ) # Find 2's complement of number
snake_case__ = bin(abs(_A ) - (1 << binary_number_length) )[3:]
snake_case__ = (
'1' + '0' * (binary_number_length - len(_A )) + binary_number
)
if shift_amount >= len(_A ):
return "0b" + binary_number[0] * len(_A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , lowerCAmelCase_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ : int = load_tool('''text-to-speech''' )
self.tool.setup()
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : List[str] = self.tool('''hey''' )
a__ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : Any = self.tool('''hey''' )
a__ : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 688 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LxmertTokenizer
__UpperCamelCase = LxmertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().setUp()
a__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : int , A__ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = '''UNwant\u00E9d,running'''
a__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : str = '''I was born in 92000, and this is falsé.'''
a__ : Tuple = tokenizer.tokenize(A__ )
a__ : Tuple = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
| 688 | 1 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , snake_case__ )
A = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A = dataset_size < in_memory_max_size
else:
A = False
A = is_small_dataset(snake_case__ )
assert result == expected | 22 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple[int, int]:
try:
_a : Any = float(lowerCAmelCase_ )
except ValueError:
raise ValueError('Please enter a valid number' )
_a : Dict = decimal - int(lowerCAmelCase_ )
if fractional_part == 0:
return int(lowerCAmelCase_ ), 1
else:
_a : List[str] = len(str(lowerCAmelCase_ ).split('.' )[1] )
_a : Any = int(decimal * (10**number_of_frac_digits) )
_a : List[Any] = 10**number_of_frac_digits
_a , _a : Optional[Any] = denominator, numerator
while True:
_a : List[Any] = dividend % divisor
if remainder == 0:
break
_a , _a : int = divisor, remainder
_a , _a : Optional[Any] = numerator / divisor, denominator / divisor
return int(lowerCAmelCase_ ), int(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction('67') = }""")
print(f"""{decimal_to_fraction('45.0') = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction('6.25') = }""")
print(f"""{decimal_to_fraction('78td') = }""")
| 358 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return (pow(lowerCAmelCase_ , 2 ) + step) % modulus
for _ in range(lowerCAmelCase_ ):
# These track the position within the cycle detection logic.
_a : Optional[int] = seed
_a : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_a : str = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Tuple = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_a : Optional[Any] = gcd(hare - tortoise , lowerCAmelCase_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_a : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
__lowerCAmelCase = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 358 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 707 |
"""simple docstring"""
from pathlib import Path
import fire
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
__magic_name__ = Path(__UpperCamelCase )
__magic_name__ = Path(__UpperCamelCase )
dest_dir.mkdir(exist_ok=__UpperCamelCase )
for path in src_dir.iterdir():
__magic_name__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
__magic_name__ = dest_dir.joinpath(path.name )
print(__UpperCamelCase )
dest_path.open('''w''' ).write('''\n'''.join(__UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 190 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[Any] = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """time_series_transformer"""
__UpperCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : str , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : str = "student_t" , snake_case_ : str = "nll" , snake_case_ : int = 1 , snake_case_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , snake_case_ : Optional[Union[str, bool]] = "mean" , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : Optional[List[int]] = None , snake_case_ : Optional[List[int]] = None , snake_case_ : int = 3_2 , snake_case_ : int = 3_2 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : bool = True , snake_case_ : str = "gelu" , snake_case_ : int = 6_4 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : int = 1_0_0 , snake_case_ : float = 0.0_2 , snake_case_ : str=True , **snake_case_ : Union[str, Any] , ):
'''simple docstring'''
snake_case__ : List[Any] = prediction_length
snake_case__ : List[Any] = context_length or prediction_length
snake_case__ : Any = distribution_output
snake_case__ : Tuple = loss
snake_case__ : Union[str, Any] = input_size
snake_case__ : List[Any] = num_time_features
snake_case__ : Optional[Any] = lags_sequence
snake_case__ : Optional[int] = scaling
snake_case__ : int = num_dynamic_real_features
snake_case__ : Optional[Any] = num_static_real_features
snake_case__ : str = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case__ : int = cardinality
else:
snake_case__ : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case__ : Optional[Any] = embedding_dimension
else:
snake_case__ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ : List[str] = num_parallel_samples
# Transformer architecture configuration
snake_case__ : Optional[Any] = input_size * len(snake_case_ ) + self._number_of_features
snake_case__ : List[str] = d_model
snake_case__ : str = encoder_attention_heads
snake_case__ : List[str] = decoder_attention_heads
snake_case__ : int = encoder_ffn_dim
snake_case__ : int = decoder_ffn_dim
snake_case__ : List[Any] = encoder_layers
snake_case__ : Dict = decoder_layers
snake_case__ : Optional[Any] = dropout
snake_case__ : List[str] = attention_dropout
snake_case__ : Dict = activation_dropout
snake_case__ : str = encoder_layerdrop
snake_case__ : List[str] = decoder_layerdrop
snake_case__ : List[Any] = activation_function
snake_case__ : List[str] = init_std
snake_case__ : int = use_cache
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 347 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _a ( __lowerCAmelCase : Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
snake_case__ : List[Any] = []
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : Tuple[int, ...] ):
"""simple docstring"""
snake_case__ : Optional[int] = []
for d in reversed(__lowerCAmelCase ):
idx.append(flat_idx % d )
snake_case__ : Union[str, Any] = flat_idx // d
return tuple(reversed(__lowerCAmelCase ) )
@torch.jit.ignore
def _a ( __lowerCAmelCase : Sequence[int] , __lowerCAmelCase : Sequence[int] , __lowerCAmelCase : Sequence[int] , __lowerCAmelCase : Optional[Sequence[bool]] = None , __lowerCAmelCase : Optional[Sequence[bool]] = None , ):
"""simple docstring"""
def reduce_edge_list(__lowerCAmelCase : List[bool] ) -> None:
snake_case__ : List[Any] = True
for i in range(len(__lowerCAmelCase ) ):
snake_case__ : Optional[int] = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case__ : Union[str, Any] = l[reversed_idx]
if start_edges is None:
snake_case__ : List[str] = [s == 0 for s in start]
reduce_edge_list(__lowerCAmelCase )
if end_edges is None:
snake_case__ : Union[str, Any] = [e == (d - 1) for e, d in zip(__lowerCAmelCase , __lowerCAmelCase )]
reduce_edge_list(__lowerCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__lowerCAmelCase ) == 0:
return [()]
elif len(__lowerCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
snake_case__ : List[Tuple[slice, ...]] = []
snake_case__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__lowerCAmelCase , __lowerCAmelCase ):
if s == e:
path_list.append(slice(__lowerCAmelCase , s + 1 ) )
else:
break
snake_case__ : Tuple[slice, ...] = tuple(__lowerCAmelCase )
snake_case__ : List[Any] = len(__lowerCAmelCase )
# start == end, and we're done
if divergence_idx == len(__lowerCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case__ : Any = start[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case__ : Dict = end[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case__ : str = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _a ( __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : Union[str, Any] = t.shape[:no_batch_dims]
snake_case__ : str = list(_flat_idx_to_idx(__lowerCAmelCase , __lowerCAmelCase ) )
# _get_minimal_slice_set is inclusive
snake_case__ : List[Any] = list(_flat_idx_to_idx(flat_end - 1 , __lowerCAmelCase ) )
# Get an ordered list of slices to perform
snake_case__ : Any = _get_minimal_slice_set(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
snake_case__ : List[str] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _a ( __lowerCAmelCase : Callable , __lowerCAmelCase : Dict[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : bool = False , __lowerCAmelCase : Any = None , __lowerCAmelCase : bool = False , ):
"""simple docstring"""
if not (len(__lowerCAmelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
snake_case__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(__lowerCAmelCase )]
snake_case__ : str = tuple([max(__lowerCAmelCase ) for s in zip(*__lowerCAmelCase )] )
def _prep_inputs(__lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
snake_case__ : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , __lowerCAmelCase )
snake_case__ : str = None
if _out is not None:
snake_case__ : int = tensor_tree_map(lambda __lowerCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
snake_case__ : Tuple = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case__ : Dict = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case__ : Any = 0
snake_case__ : Dict = prepped_outputs
for _ in range(__lowerCAmelCase ):
# Chunk the input
if not low_mem:
snake_case__ : int = _select_chunk
else:
snake_case__ : Optional[Any] = partial(
_chunk_slice , flat_start=__lowerCAmelCase , flat_end=min(__lowerCAmelCase , i + chunk_size ) , no_batch_dims=len(__lowerCAmelCase ) , )
snake_case__ : Dict[str, Any] = tensor_tree_map(__lowerCAmelCase , __lowerCAmelCase )
# Run the layer on the chunk
snake_case__ : Optional[int] = layer(**__lowerCAmelCase )
# Allocate space for the output
if out is None:
snake_case__ : str = tensor_tree_map(lambda __lowerCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __lowerCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
def assign(__lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> None:
for k, v in da.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assign(__lowerCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case__ : List[Any] = da[k]
assign(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for xa, xa in zip(__lowerCAmelCase , __lowerCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case__ : Optional[int] = xa
elif isinstance(__lowerCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case__ : str = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
snake_case__ : Optional[Any] = tensor_tree_map(lambda __lowerCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) , __lowerCAmelCase )
return out
class a :
"""simple docstring"""
def __init__( self : str , snake_case_ : int = 5_1_2 , ):
'''simple docstring'''
snake_case__ : Any = max_chunk_size
snake_case__ : Optional[int] = None
snake_case__ : Optional[tuple] = None
def __magic_name__ ( self : List[Any] , snake_case_ : Callable , snake_case_ : tuple , snake_case_ : int ):
'''simple docstring'''
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
snake_case__ : Any = [c for c in candidates if c > min_chunk_size]
snake_case__ : int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case_ : int ) -> bool:
try:
with torch.no_grad():
fn(*snake_case_ , chunk_size=snake_case_ )
return True
except RuntimeError:
return False
snake_case__ : Any = 0
snake_case__ : Optional[int] = len(snake_case_ ) - 1
while i > min_viable_chunk_size_index:
snake_case__ : Any = test_chunk_size(candidates[i] )
if not viable:
snake_case__ : Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
snake_case__ : Tuple = i
snake_case__ : Any = (i + len(snake_case_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __magic_name__ ( self : List[Any] , snake_case_ : Iterable , snake_case_ : Iterable ):
'''simple docstring'''
snake_case__ : str = True
for aa, aa in zip(snake_case_ , snake_case_ ):
assert type(snake_case_ ) == type(snake_case_ )
if isinstance(snake_case_ , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
snake_case__ : List[str] = [v for _, v in sorted(aa.items() , key=lambda snake_case_ : x[0] )]
snake_case__ : Any = [v for _, v in sorted(aa.items() , key=lambda snake_case_ : x[0] )]
consistent &= self._compare_arg_caches(snake_case_ , snake_case_ )
else:
consistent &= aa == aa
return consistent
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Callable , snake_case_ : tuple , snake_case_ : int , ):
'''simple docstring'''
snake_case__ : str = True
snake_case__ : tuple = tree_map(lambda snake_case_ : a.shape if isinstance(snake_case_ , torch.Tensor ) else a , snake_case_ , snake_case_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case_ )
snake_case__ : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , snake_case_ )
else:
# Otherwise, we can reuse the precomputed value
snake_case__ : str = False
if not consistent:
snake_case__ : Dict = self._determine_favorable_chunk_size(
snake_case_ , snake_case_ , snake_case_ , )
snake_case__ : int = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 347 | 1 |
from __future__ import annotations
from math import gcd
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ = 2 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""")
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) -> int:
return (pow(UpperCAmelCase_ , 2) + step) % modulus
for _ in range(UpperCAmelCase_):
# These track the position within the cycle detection logic.
snake_case__ : str = seed
snake_case__ : List[str] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
snake_case__ : List[str] = rand_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
snake_case__ : Optional[Any] = rand_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
snake_case__ : Optional[int] = rand_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
snake_case__ : str = gcd(hare - tortoise , UpperCAmelCase_)
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
snake_case__ : str = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowercase_: List[str] = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
lowercase_: Tuple = parser.parse_args()
lowercase_: Optional[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
lowercase_: List[str] = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 127 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __a : Tuple , __a : Union[str, Any]=7 , __a : List[Any]=3 , __a : str=1_8 , __a : Optional[int]=3_0 , __a : Optional[int]=4_0_0 , __a : Optional[Any]=True , __a : Optional[int]=None , __a : str=True , __a : Union[str, Any]=None , __a : List[Any]=True , __a : int=[0.48145466, 0.4578275, 0.40821073] , __a : Optional[int]=[0.26862954, 0.26130258, 0.27577711] , __a : Optional[Any]=True , ):
snake_case__ : Tuple = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case__ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : Dict = num_channels
snake_case__ : Optional[int] = image_size
snake_case__ : str = min_resolution
snake_case__ : Optional[Any] = max_resolution
snake_case__ : Optional[Any] = do_resize
snake_case__ : Union[str, Any] = size
snake_case__ : Optional[int] = do_center_crop
snake_case__ : Union[str, Any] = crop_size
snake_case__ : Tuple = do_normalize
snake_case__ : int = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Union[str, Any] = do_convert_rgb
def lowercase ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase ( self : str , __a : Optional[Any]=False , __a : Dict=False , __a : str=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
snake_case__ : List[Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
snake_case__ : str = []
for i in range(self.batch_size ):
snake_case__ , snake_case__ : Union[str, Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
snake_case__ : Optional[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
if torchify:
snake_case__ : List[Any] = [torch.from_numpy(__a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase ( self : str ):
snake_case__ : List[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a )
@property
def lowercase ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : Tuple ):
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """do_center_crop""" ) )
self.assertTrue(hasattr(__a , """center_crop""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
self.assertTrue(hasattr(__a , """image_mean""" ) )
self.assertTrue(hasattr(__a , """image_std""" ) )
self.assertTrue(hasattr(__a , """do_convert_rgb""" ) )
def lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : int ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : str = self.image_processor_tester.prepare_inputs(equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase ( self : Optional[int] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase ( self : Dict ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Tuple = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase ( self : Any ):
snake_case__ : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a )
snake_case__ : Optional[int] = 3
@property
def lowercase ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : List[str] ):
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """do_center_crop""" ) )
self.assertTrue(hasattr(__a , """center_crop""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
self.assertTrue(hasattr(__a , """image_mean""" ) )
self.assertTrue(hasattr(__a , """image_std""" ) )
self.assertTrue(hasattr(__a , """do_convert_rgb""" ) )
def lowercase ( self : Any ):
pass
def lowercase ( self : Any ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
snake_case__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 127 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def A ( *a_ : Optional[Any] , **a_ : int ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@require_torch
def A ( self : int ):
"""simple docstring"""
__snake_case = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a_ ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
] , )
@require_tf
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(a_ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
] , )
@slow
@require_torch
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(a_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def A ( self : int ):
"""simple docstring"""
__snake_case = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(a_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 69 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =ArgumentParser("""Diffusers CLI tool""", usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE__ =parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase )
# Let's go
SCREAMING_SNAKE_CASE__ =parser.parse_args()
if not hasattr(__UpperCamelCase, """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ =args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 151 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A = 13 , __A = 64 , __A = 2 , __A = 3 , __A = 3 , __A = True , __A = True , __A = 128 , __A=[16, 32, 64, 128] , __A = 7 , __A = 4 , __A = 37 , __A = "gelu" , __A = 0.1 , __A = 0.1 , __A = 10 , __A = 0.02 , __A = 2 , __A = 1 , __A = 128 , __A = [2, 2, 2, 2] , __A = 2 , __A = 2 , ):
"""simple docstring"""
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Union[str, Any] = patch_size
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[Any] = is_training
lowerCamelCase : Any = use_labels
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Optional[int] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Dict = encoder_stride
lowerCamelCase : Dict = num_attention_outputs
lowerCamelCase : int = embed_dim
lowerCamelCase : Optional[int] = embed_dim + 1
lowerCamelCase : int = resolution
lowerCamelCase : Tuple = depths
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Any = dim
lowerCamelCase : Optional[int] = mlp_expansion_ratio
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Dict = TFEfficientFormerModel(config=__A )
lowerCamelCase : List[Any] = model(__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
lowerCamelCase : Union[str, Any] = TFEfficientFormerForImageClassification(__A )
lowerCamelCase : int = model(__A , labels=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : int = TFEfficientFormerForImageClassification(__A )
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Dict = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase : List[str] = config_and_inputs
lowerCamelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : str = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__A : Any = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__A : int = False
__A : int = False
__A : Dict = False
__A : str = False
__A : Optional[int] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = TFEfficientFormerModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(
self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : str = model_class(__A )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(__A , __A , __A ):
lowerCamelCase : int = model_class(__A )
lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A , __A ) , training=__A )
lowerCamelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : List[str] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__A ) , __A )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowerCamelCase : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCamelCase : int = seq_length * self.model_tester.chunk_length
else:
lowerCamelCase : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCamelCase : List[str] = outputs.decoder_hidden_states
self.asseretIsInstance(__A , (list, tuple) )
self.assertEqual(len(__A ) , __A )
lowerCamelCase : List[Any] = getattr(self.model_tester , "seq_length" , __A )
lowerCamelCase : Any = getattr(self.model_tester , "decoder_seq_length" , __A )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : str = True
check_hidden_states_output(__A , __A , __A )
def _snake_case ( self , __A , __A , __A=False ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[Any] = TFEfficientFormerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Dict = True
lowerCamelCase : Optional[Any] = getattr(self.model_tester , "seq_length" , __A )
lowerCamelCase : int = getattr(self.model_tester , "encoder_seq_length" , __A )
lowerCamelCase : Dict = getattr(self.model_tester , "key_length" , __A )
lowerCamelCase : Union[str, Any] = getattr(self.model_tester , "chunk_length" , __A )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowerCamelCase : Optional[int] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[str] = False
lowerCamelCase : int = True
lowerCamelCase : List[str] = model_class(__A )
lowerCamelCase : List[str] = model(**self._prepare_for_class(__A , __A ) , training=__A )
lowerCamelCase : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase : str = True
lowerCamelCase : Tuple = model_class(__A )
lowerCamelCase : Any = model(**self._prepare_for_class(__A , __A ) , training=__A )
lowerCamelCase : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCamelCase : Optional[Any] = model_class(__A )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCamelCase : List[str] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__A )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCamelCase : int = model(__A )
self.assertTrue(outputs_dict is not None )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCamelCase : List[Any] = self.default_image_processor
lowerCamelCase : int = prepare_img()
lowerCamelCase : List[Any] = image_processor(images=__A , return_tensors="tf" )
# forward pass
lowerCamelCase : Optional[Any] = model(**__A , training=__A )
# verify the logits
lowerCamelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
lowerCamelCase : int = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCamelCase : str = self.default_image_processor
lowerCamelCase : List[Any] = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__A , return_tensors="tf" )
# forward pass
lowerCamelCase : Optional[Any] = model(**__A , training=__A )
# verify the logits
lowerCamelCase : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
lowerCamelCase : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 711 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : torch.FloatTensor
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __A = 16 , __A = 88 , __A = None , __A = None , __A = 1 , __A = 0.0 , __A = 32 , __A = None , __A = False , __A = None , __A = "geglu" , __A = True , __A = True , ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : str = attention_head_dim
lowerCamelCase : Union[str, Any] = num_attention_heads * attention_head_dim
lowerCamelCase : List[str] = in_channels
lowerCamelCase : Optional[int] = torch.nn.GroupNorm(num_groups=__A , num_channels=__A , eps=1e-6 , affine=__A )
lowerCamelCase : Any = nn.Linear(__A , __A )
# 3. Define transformers blocks
lowerCamelCase : int = nn.ModuleList(
[
BasicTransformerBlock(
__A , __A , __A , dropout=__A , cross_attention_dim=__A , activation_fn=__A , attention_bias=__A , double_self_attention=__A , norm_elementwise_affine=__A , )
for d in range(__A )
] )
lowerCamelCase : Any = nn.Linear(__A , __A )
def _snake_case ( self , __A , __A=None , __A=None , __A=None , __A=1 , __A=None , __A = True , ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = hidden_states.shape
lowerCamelCase : Optional[int] = batch_frames // num_frames
lowerCamelCase : List[str] = hidden_states
lowerCamelCase : List[Any] = hidden_states[None, :].reshape(__A , __A , __A , __A , __A )
lowerCamelCase : Optional[int] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase : List[str] = self.norm(__A )
lowerCamelCase : Dict = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __A , __A )
lowerCamelCase : str = self.proj_in(__A )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase : Optional[int] = block(
__A , encoder_hidden_states=__A , timestep=__A , cross_attention_kwargs=__A , class_labels=__A , )
# 3. Output
lowerCamelCase : Dict = self.proj_out(__A )
lowerCamelCase : Any = (
hidden_states[None, None, :]
.reshape(__A , __A , __A , __A , __A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase : Tuple = hidden_states.reshape(__A , __A , __A , __A )
lowerCamelCase : Optional[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__A )
| 231 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
a : str =StableDiffusionDiffEditPipeline
a : List[Any] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
a : str =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
a : str =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a : Union[str, Any] =frozenset([] )
def _a ( self ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A_ , )
UpperCamelCase_: Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
UpperCamelCase_: Union[str, Any] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_zero=A_ , )
torch.manual_seed(0 )
UpperCamelCase_: int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCamelCase_: Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
UpperCamelCase_: List[Any] = CLIPTextModel(A_ )
UpperCamelCase_: str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_: str = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
UpperCamelCase_: Any = floats_tensor((1, 1_6, 1_6) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase_: str = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
UpperCamelCase_: Optional[int] = torch.manual_seed(A_ )
else:
UpperCamelCase_: Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase_: List[str] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
UpperCamelCase_: Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase_: str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_: Dict = Image.fromarray(np.uinta(A_ ) ).convert('RGB' )
if str(A_ ).startswith('mps' ):
UpperCamelCase_: int = torch.manual_seed(A_ )
else:
UpperCamelCase_: Any = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase_: Union[str, Any] = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
UpperCamelCase_: List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase_: List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_: Optional[Any] = Image.fromarray(np.uinta(A_ ) ).convert('RGB' )
if str(A_ ).startswith('mps' ):
UpperCamelCase_: Dict = torch.manual_seed(A_ )
else:
UpperCamelCase_: str = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase_: Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
if not hasattr(self.pipeline_class , '_optional_components' ):
return
UpperCamelCase_: List[str] = self.get_dummy_components()
UpperCamelCase_: Optional[int] = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A_ , A_ , A_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCamelCase_: List[str] = self.get_dummy_inputs(A_ )
UpperCamelCase_: Optional[Any] = pipe(**A_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A_ )
UpperCamelCase_: Tuple = self.pipeline_class.from_pretrained(A_ )
pipe_loaded.to(A_ )
pipe_loaded.set_progress_bar_config(disable=A_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A_ , A_ ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCamelCase_: List[Any] = self.get_dummy_inputs(A_ )
UpperCamelCase_: Any = pipe_loaded(**A_ )[0]
UpperCamelCase_: Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(A_ , 1e-4 )
def _a ( self ):
UpperCamelCase_: List[str] = 'cpu'
UpperCamelCase_: Union[str, Any] = self.get_dummy_components()
UpperCamelCase_: str = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase_: int = self.get_dummy_mask_inputs(A_ )
UpperCamelCase_: List[str] = pipe.generate_mask(**A_ )
UpperCamelCase_: Optional[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
UpperCamelCase_: Any = np.array([0] * 9 )
UpperCamelCase_: List[str] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _a ( self ):
UpperCamelCase_: Optional[Any] = 'cpu'
UpperCamelCase_: List[str] = self.get_dummy_components()
UpperCamelCase_: List[str] = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase_: Dict = self.get_dummy_inversion_inputs(A_ )
UpperCamelCase_: Optional[int] = pipe.invert(**A_ ).images
UpperCamelCase_: Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
UpperCamelCase_: Optional[int] = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCamelCase_: Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ , 1e-3 )
def _a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = 'cpu'
UpperCamelCase_: Optional[Any] = self.get_dummy_components()
UpperCamelCase_: Optional[int] = {'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'beta_schedule': 'scaled_linear'}
UpperCamelCase_: str = DPMSolverMultistepScheduler(**A_ )
UpperCamelCase_: str = DPMSolverMultistepInverseScheduler(**A_ )
UpperCamelCase_: Dict = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase_: List[Any] = self.get_dummy_inversion_inputs(A_ )
UpperCamelCase_: Optional[int] = pipe.invert(**A_ ).images
UpperCamelCase_: Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
UpperCamelCase_: Optional[Any] = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCamelCase_: int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ , 1e-3 )
@require_torch_gpu
@slow
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _a ( cls ):
UpperCamelCase_: Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
UpperCamelCase_: Optional[Any] = raw_image.convert('RGB' ).resize((7_6_8, 7_6_8) )
UpperCamelCase_: Tuple = raw_image
def _a ( self ):
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: Any = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=A_ , torch_dtype=torch.floataa )
UpperCamelCase_: List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCamelCase_: str = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase_: str = 'a bowl of fruit'
UpperCamelCase_: List[str] = 'a bowl of pears'
UpperCamelCase_: Dict = pipe.generate_mask(
image=self.raw_image , source_prompt=A_ , target_prompt=A_ , generator=A_ , )
UpperCamelCase_: List[str] = pipe.invert(
prompt=A_ , image=self.raw_image , inpaint_strength=0.7 , generator=A_ ).latents
UpperCamelCase_: int = pipe(
prompt=A_ , mask_image=A_ , image_latents=A_ , generator=A_ , negative_prompt=A_ , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
UpperCamelCase_: Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
def _a ( self ):
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: str = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=A_ , torch_dtype=torch.floataa )
UpperCamelCase_: int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCamelCase_: Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase_: str = 'a bowl of fruit'
UpperCamelCase_: Union[str, Any] = 'a bowl of pears'
UpperCamelCase_: Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=A_ , target_prompt=A_ , generator=A_ , )
UpperCamelCase_: int = pipe.invert(
prompt=A_ , image=self.raw_image , inpaint_strength=0.7 , generator=A_ , num_inference_steps=2_5 , ).latents
UpperCamelCase_: List[Any] = pipe(
prompt=A_ , mask_image=A_ , image_latents=A_ , generator=A_ , negative_prompt=A_ , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type='numpy' , ).images[0]
UpperCamelCase_: int = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1 | 57 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_A : List[str] = logging.get_logger(__name__)
_A : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_A : Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {}
with open(lowerCAmelCase_ , '''r''' ) as file:
for line_number, line in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = line.strip()
if line:
SCREAMING_SNAKE_CASE__ = line.split()
SCREAMING_SNAKE_CASE__ = line_number
SCREAMING_SNAKE_CASE__ = words[0]
SCREAMING_SNAKE_CASE__ = value
return result
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE__ = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ = hf_pointer
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE__ = value[0]
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
SCREAMING_SNAKE_CASE__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE__ = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ = '''.'''.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE__ = key
SCREAMING_SNAKE_CASE__ = value if '''lm_head''' in full_key else value[0]
_A : Union[str, Any] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Tuple:
SCREAMING_SNAKE_CASE__ = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ = '''weight'''
else:
SCREAMING_SNAKE_CASE__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return is_used
return is_used
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False ) -> int:
if config_path is not None:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE__ = read_txt_into_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = WavaVecaForSequenceClassification(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCAmelCase_ , '''vocab.json''' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaForCTC(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaForPreTraining(lowerCAmelCase_ )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ = argparse.Namespace(task='''audio_pretraining''' )
SCREAMING_SNAKE_CASE__ = fairseq.tasks.setup_task(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_A : List[str] = parser.parse_args()
_A : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 100 | 0 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
UpperCamelCase__ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase__ = json.loads(lowerCamelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase__ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase__ = json.loads(lowerCamelCase_ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowercase_ ( lowerCamelCase__ ):
__UpperCAmelCase = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def __a ( self ):
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , __lowerCamelCase , )
@cached_property
def __a ( self ):
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
UpperCamelCase__ = torch.device("cpu" )
UpperCamelCase__ = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase__ = smp.local_rank()
UpperCamelCase__ = torch.device("cuda" , __lowerCamelCase )
UpperCamelCase__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
UpperCamelCase__ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
UpperCamelCase__ = torch.device("cuda" , self.local_rank )
UpperCamelCase__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
UpperCamelCase__ = torch.device("cuda" , self.local_rank )
UpperCamelCase__ = 1
if device.type == "cuda":
torch.cuda.set_device(__lowerCamelCase )
return device
@property
def __a ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __a ( self ):
return not is_sagemaker_model_parallel_available()
@property
def __a ( self ):
return False
| 702 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase_ ( a__ ):
@staticmethod
@abstractmethod
def __a ( a ):
raise NotImplementedError()
@abstractmethod
def __a ( self ):
raise NotImplementedError()
| 223 | 0 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
_lowerCAmelCase = [sys.executable] + distributed_args
execute_subprocess_async(__magic_name__ , env=os.environ.copy() )
| 589 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str):
# Initialise PyTorch model
lowerCamelCase : Optional[int] = FunnelConfig.from_json_file(UpperCAmelCase__)
print(F'''Building PyTorch model from configuration: {config}''')
lowerCamelCase : Optional[Any] = FunnelBaseModel(UpperCAmelCase__) if base_model else FunnelModel(UpperCAmelCase__)
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , UpperCAmelCase__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 320 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Tuple ) -> Union[str, Any]:
_A = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_A = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , __A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
# load decoder from hub
_A = '''hf-internal-testing/ngram-beam-search-decoder'''
def __A ( self: Any , **__A: str ) -> Union[str, Any]:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(__A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: List[str] , **__A: Optional[Any] ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A )
def __A ( self: Optional[Any] , **__A: str ) -> Optional[Any]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A )
def __A ( self: Tuple ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __A ( self: Dict ) -> Optional[int]:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __A )
def __A ( self: Any ) -> List[str]:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __A ( self: str ) -> int:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __A ( self: int ) -> Dict:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = floats_list((3, 10_00) )
_A = feature_extractor(__A , return_tensors='''np''' )
_A = processor(__A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self: Optional[int] ) -> Optional[int]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = '''This is a test string'''
_A = processor(text=__A )
_A = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self: List[Any] , __A: int=(2, 10, 16) , __A: List[Any]=77 ) -> Dict:
np.random.seed(__A )
return np.random.rand(*__A )
def __A ( self: List[Any] ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(__A )
_A = decoder.decode_beams(__A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __A ( self: List[Any] , __A: Any ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(__A )
else:
with get_context(__A ).Pool() as pool:
_A = processor.batch_decode(__A , __A )
_A = list(__A )
with get_context('''fork''' ).Pool() as p:
_A = decoder.decode_beams_batch(__A , __A )
_A ,_A ,_A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__A , decoded_processor.logit_score )
self.assertListEqual(__A , decoded_processor.lm_score )
def __A ( self: Tuple ) -> Union[str, Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
__A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = decoded_processor_out.text
_A = list(__A )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __A )
self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __A , atol=1e-3 ) )
self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __A , atol=1e-3 ) )
def __A ( self: Dict ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
__A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
_A = decoded_processor_out.text
_A = list(__A )
decoder.reset_params(
alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __A )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __A )
def __A ( self: Union[str, Any] ) -> int:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__A , __A )
def __A ( self: Optional[int] ) -> Dict:
_A = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_A = WavaVecaProcessorWithLM.from_pretrained(__A )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = os.listdir(__A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__A , __A )
def __A ( self: Tuple ) -> Optional[Any]:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(__A , return_tensors='''np''' )
_A = processor_auto(__A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(__A )
_A = processor_auto.batch_decode(__A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __A ( self: Optional[Any] ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __A ( __A: Any , __A: Tuple ) -> int:
_A = [d[key] for d in offsets]
return retrieved_list
def __A ( self: Dict ) -> List[str]:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()[0]
_A = processor.decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __A ( self: List[str] ) -> List[str]:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()
_A = processor.batch_decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __A ( self: List[str] ) -> Optional[int]:
import torch
_A = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__A )
_A = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(__A )
_A = next(__A )
_A = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_A = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_A = model(__A ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=__A )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_A = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , __A )
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(__A , '''start_time''' ) )
_A = torch.tensor(self.get_from_offsets(__A , '''end_time''' ) )
# fmt: off
_A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
| 718 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__A )
# standard deviation of the initial noise distribution
_A = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_A = 4
# running values
_A = []
def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int:
_A = num_inference_steps
_A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_A = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_A = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_A = torch.sin(steps * math.pi / 2 ) ** 2
_A = (1.0 - self.betas**2) ** 0.5
_A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_A = timesteps.to(__A )
_A = []
def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
_A = (self.timesteps == timestep).nonzero().item()
_A = timestep_index + 1
_A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__A )
if len(self.ets ) == 1:
_A = self.ets[-1]
elif len(self.ets ) == 2:
_A = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_A = self._get_prev_sample(__A , __A , __A , __A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor:
return sample
def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]:
_A = self.alphas[timestep_index]
_A = self.betas[timestep_index]
_A = self.alphas[prev_timestep_index]
_A = self.betas[prev_timestep_index]
_A = (sample - sigma * ets) / max(__A , 1e-8 )
_A = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[str] ) -> Dict:
return self.config.num_train_timesteps
| 62 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ : Any = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
snake_case__ : str = {"""allegro/herbert-base-cased""": 5_1_4}
snake_case__ : Union[str, Any] = {}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = HerbertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase="</s>" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , **_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
UpperCamelCase_ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 23 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 1 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 , _SCREAMING_SNAKE_CASE : List[str] = 1 , _SCREAMING_SNAKE_CASE : Optional[Any] = 1.0e4 , _SCREAMING_SNAKE_CASE : Optional[Any] = False , _SCREAMING_SNAKE_CASE : Dict = 1.0 , )->Optional[Any]:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
_lowerCAmelCase = float(embedding_dim // 2 )
_lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase_ , dtype=jnp.floataa ) * -log_timescale_increment )
_lowerCAmelCase = jnp.expand_dims(lowerCAmelCase_ , 1 ) * jnp.expand_dims(lowerCAmelCase_ , 0 )
# scale embeddings
_lowerCAmelCase = scale * emb
if flip_sin_to_cos:
_lowerCAmelCase = jnp.concatenate([jnp.cos(lowerCAmelCase_ ), jnp.sin(lowerCAmelCase_ )] , axis=1 )
else:
_lowerCAmelCase = jnp.concatenate([jnp.sin(lowerCAmelCase_ ), jnp.cos(lowerCAmelCase_ )] , axis=1 )
_lowerCAmelCase = jnp.reshape(lowerCAmelCase_ , [jnp.shape(lowerCAmelCase_ )[0], embedding_dim] )
return signal
class UpperCAmelCase ( nn.Module ):
SCREAMING_SNAKE_CASE__ = 3_2
SCREAMING_SNAKE_CASE__ = jnp.floataa
@nn.compact
def __call__( self , _lowerCAmelCase ):
_lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(lowercase__ )
_lowerCAmelCase = nn.silu(lowercase__ )
_lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(lowercase__ )
return temb
class UpperCAmelCase ( nn.Module ):
SCREAMING_SNAKE_CASE__ = 3_2
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 1
@nn.compact
def __call__( self , _lowerCAmelCase ):
return get_sinusoidal_embeddings(
lowercase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift ) | 703 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None:
_lowerCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowerCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase = 1.0
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase = self.alphas_cumprod[timestep]
_lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
_lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps | 664 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
SCREAMING_SNAKE_CASE__ = {"mgp-str": 27}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase , lowercase="[GO]" , lowercase="[GO]" , lowercase="[s]" , lowercase="[GO]" , **lowercase ) -> Dict:
super().__init__(
unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , pad_token=lowercase , **lowercase , )
with open(lowercase , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase = json.load(lowercase )
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def _snake_case ( self ) -> Any:
return len(self.vocab )
def _snake_case ( self ) -> List[Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def _snake_case ( self , lowercase ) -> int:
lowerCAmelCase = []
for s in text:
char_tokens.extend(lowercase )
return char_tokens
def _snake_case ( self , lowercase ) -> List[str]:
return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
return self.decoder.get(lowercase )
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowercase ) )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + """\n""" )
return (vocab_file,)
| 532 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
@property
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = ScoreSdeVeScheduler()
lowerCAmelCase = ScoreSdeVePipeline(unet=lowercase , scheduler=lowercase )
sde_ve.to(lowercase )
sde_ve.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowercase ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowercase , return_dict=lowercase )[
0
]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = """google/ncsnpp-church-256"""
lowerCAmelCase = UNetaDModel.from_pretrained(lowercase )
lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(lowercase )
lowerCAmelCase = ScoreSdeVePipeline(unet=lowercase , scheduler=lowercase )
sde_ve.to(lowercase )
sde_ve.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=lowercase ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 532 | 1 |
from __future__ import annotations
def UpperCamelCase__ ( lowerCAmelCase__ = 4 ):
lowercase = abs(_SCREAMING_SNAKE_CASE ) or 4
return [[1 + x + y * row_size for x in range(_SCREAMING_SNAKE_CASE )] for y in range(_SCREAMING_SNAKE_CASE )]
def UpperCamelCase__ ( lowerCAmelCase__ ):
return reverse_row(transpose(_SCREAMING_SNAKE_CASE ) )
# OR.. transpose(reverse_column(matrix))
def UpperCamelCase__ ( lowerCAmelCase__ ):
return reverse_row(reverse_column(_SCREAMING_SNAKE_CASE ) )
# OR.. reverse_column(reverse_row(matrix))
def UpperCamelCase__ ( lowerCAmelCase__ ):
return reverse_column(transpose(_SCREAMING_SNAKE_CASE ) )
# OR.. transpose(reverse_row(matrix))
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = [list(_SCREAMING_SNAKE_CASE ) for x in zip(*_SCREAMING_SNAKE_CASE )]
return matrix
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = matrix[::-1]
return matrix
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = [x[::-1] for x in matrix]
return matrix
def UpperCamelCase__ ( lowerCAmelCase__ ):
for i in matrix:
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int =make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
__SCREAMING_SNAKE_CASE : Tuple =make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
__SCREAMING_SNAKE_CASE : Optional[int] =make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 702 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A_ :
_A :int
_A :int
class A_ :
def __init__( self : List[str] , snake_case__ : int ):
lowercase = [[] for _ in range(snake_case__ )]
lowercase = size
def __getitem__( self : Optional[int] , snake_case__ : int ):
return iter(self._graph[vertex] )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self._size
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ):
lowercase = deque([start_vertex] )
lowercase = [None] * self.size
lowercase = 0
while queue:
lowercase = queue.popleft()
lowercase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase = current_distance + edge.weight
lowercase = distances[edge.destination_vertex]
if (
isinstance(snake_case__ , snake_case__ )
and new_distance >= dest_vertex_distance
):
continue
lowercase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
lowercase = (
subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode("utf-8").split()
)
lowercase = "|".join(sys.argv[1:])
lowercase = re.compile(rf'^({joined_dirs}).*?\.py$')
lowercase = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 198 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
lowerCAmelCase = None
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = None
lowerCAmelCase = 1
lowerCAmelCase = None
lowerCAmelCase = False
lowerCAmelCase = None
lowerCAmelCase = None
def _UpperCamelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(a ) for k, v in self.__dict__.items()} )
| 198 | 1 |
def __lowerCamelCase ( _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = [0] * len(_lowerCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCAmelCase )
while queue:
_UpperCAmelCase = queue.pop(0 )
cnt += 1
topo.append(_lowerCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCAmelCase )
if cnt != len(_lowerCAmelCase ):
print("Cycle exists" )
else:
print(_lowerCAmelCase )
# Adjacency List of Graph
__lowerCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 129 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 129 | 1 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tmp_path / """cache"""
UpperCAmelCase__ : List[Any] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase__ : Union[str, Any] = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Dict = tmp_path / """cache"""
UpperCAmelCase__ : Tuple = {"""text""": """string"""}
UpperCAmelCase__ : Dict = features.copy() if features else default_expected_features
UpperCAmelCase__ : Tuple = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase__ : Optional[Any] = TextDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tmp_path / """cache"""
UpperCAmelCase__ : str = {"""text""": """string"""}
UpperCAmelCase__ : Dict = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if issubclass(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Union[str, Any] = text_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Tuple = [text_path]
UpperCAmelCase__ : Dict = tmp_path / """cache"""
UpperCAmelCase__ : Optional[Any] = {"""text""": """string"""}
UpperCAmelCase__ : Union[str, Any] = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ):
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
UpperCAmelCase__ : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = tmp_path / """cache"""
UpperCAmelCase__ : Tuple = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase__ : Dict = TextDatasetReader({"""train""": text_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase__ : Optional[int] = {"""text""": """string"""}
UpperCAmelCase__ : Optional[Any] = features.copy() if features else default_expected_features
UpperCAmelCase__ : str = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase__ : Tuple = TextDatasetReader({"""train""": text_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if split:
UpperCAmelCase__ : Tuple = {split: text_path}
else:
UpperCAmelCase__ : Optional[int] = """train"""
UpperCAmelCase__ : Optional[Any] = {"""train""": text_path, """test""": text_path}
UpperCAmelCase__ : List[str] = tmp_path / """cache"""
UpperCAmelCase__ : Optional[Any] = {"""text""": """string"""}
UpperCAmelCase__ : str = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 65 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _UpperCamelCase ( unittest.TestCase , _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = load_tool("""text-classification""" )
self.tool.setup()
UpperCamelCase_: Optional[int] = load_tool("""text-classification""" , remote=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: int = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
| 548 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , ) -> None:
"""simple docstring"""
UpperCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def __lowerCamelCase ( A__ ) -> None:
"""simple docstring"""
UpperCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print('' )
print(len(A__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 714 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCamelCase : Dict = False
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Dict = "ybelkada/fonts"
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'Pix2StructImageProcessor. Please upgrade torch.' )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
requires_backends(A__ , ['torch'] )
_check_torch_version()
UpperCamelCase = image_tensor.unsqueeze(0 )
UpperCamelCase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
UpperCamelCase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def __lowerCamelCase ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ) -> Image.Image:
"""simple docstring"""
requires_backends(A__ , 'vision' )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase = textwrap.TextWrapper(width=80 )
UpperCamelCase = wrapper.wrap(text=A__ )
UpperCamelCase = '\n'.join(A__ )
if font_bytes is not None and font_path is None:
UpperCamelCase = io.BytesIO(A__ )
elif font_path is not None:
UpperCamelCase = font_path
else:
UpperCamelCase = hf_hub_download(A__ , 'Arial.TTF' )
UpperCamelCase = ImageFont.truetype(A__ , encoding='UTF-8' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase = ImageDraw.Draw(Image.new('RGB' , (1, 1) , A__ ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
UpperCamelCase = text_width + left_padding + right_padding
UpperCamelCase = text_height + top_padding + bottom_padding
UpperCamelCase = Image.new('RGB' , (image_width, image_height) , A__ )
UpperCamelCase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def __lowerCamelCase ( A__ , A__ , **A__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(A__ , 'vision' )
# Convert to PIL image if necessary
UpperCamelCase = to_pil_image(A__ )
UpperCamelCase = render_text(A__ , **A__ )
UpperCamelCase = max(header_image.width , image.width )
UpperCamelCase = int(image.height * (new_width / image.width) )
UpperCamelCase = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
UpperCamelCase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flattened_patches"""]
def __init__( self : Any , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_0_4_8 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Any , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
UpperCamelCase = do_normalize
UpperCamelCase = do_convert_rgb
UpperCamelCase = max_patches
UpperCamelCase = is_vqa
def A ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
UpperCamelCase = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
UpperCamelCase = torch.from_numpy(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = patch_size['height'], patch_size['width']
UpperCamelCase , UpperCamelCase = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
UpperCamelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
UpperCamelCase = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
UpperCamelCase = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = patches.shape
UpperCamelCase = patches_shape[1]
UpperCamelCase = patches_shape[2]
UpperCamelCase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
UpperCamelCase = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase = row_ids.to(torch.floataa )
UpperCamelCase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase = to_numpy_array(UpperCamelCase__ )
return result
def A ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
if image.dtype == np.uinta:
UpperCamelCase = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase = np.mean(UpperCamelCase__ )
UpperCamelCase = np.std(UpperCamelCase__ )
UpperCamelCase = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Any , ):
"""simple docstring"""
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase = patch_size if patch_size is not None else self.patch_size
UpperCamelCase = max_patches if max_patches is not None else self.max_patches
UpperCamelCase = self.is_vqa
if kwargs.get('data_format' , UpperCamelCase__ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
UpperCamelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
UpperCamelCase = kwargs.pop('font_bytes' , UpperCamelCase__ )
UpperCamelCase = kwargs.pop('font_path' , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [header_text] * len(UpperCamelCase__ )
UpperCamelCase = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
UpperCamelCase = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
UpperCamelCase = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
UpperCamelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 324 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = GPTSwaTokenizer
A : Optional[Any] = False
A : Tuple = True
A : int = False
def _lowerCAmelCase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Union[str, Any] = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ : List[str] = "This is a test"
snake_case_ : Optional[Any] = "This is a test"
return input_text, output_text
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Any = "<s>"
snake_case_ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2000 )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Optional[Any] = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE )
snake_case_ : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [465, 287, 265, 631, 842] )
snake_case_ : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def _lowerCAmelCase ( self ) -> str:
snake_case_ : List[str] = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = ["This is a test", "I was born in 92000, and this is falsé."]
snake_case_ : str = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Any = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
snake_case_ : Dict = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="AI-Sweden/gpt-sw3-126m" , sequences=_SCREAMING_SNAKE_CASE , )
| 568 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
lowercase : List[str] = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
lowercase : int = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def lowerCAmelCase__ ( _a : List[str] ):
snake_case_ : str = set()
snake_case_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : List[Any] = char
snake_case_ : Any = set(_a )
return pairs
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = vocab_file
snake_case_ : Any = merges_file
snake_case_ : Any = {}
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 1
snake_case_ : Optional[int] = 2
snake_case_ : Optional[int] = 3
self.add_from_file(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
snake_case_ : List[Any] = merges_handle.read().split("\n" )[:-1]
snake_case_ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
snake_case_ : List[str] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Optional[Any] = {}
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
snake_case_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Dict:
return len(self.encoder )
def _lowerCAmelCase ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
snake_case_ : List[Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case_ : str = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
snake_case_ : List[str] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : Dict = bigram
snake_case_ : Any = []
snake_case_ : Any = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
snake_case_ : List[str] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Optional[Any] = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Union[str, Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
snake_case_ : List[Any] = get_pairs(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = "@@ ".join(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = word[:-4]
snake_case_ : Union[str, Any] = word
return word
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ : List[Any] = []
snake_case_ : str = re.findall(r"\S+\n?" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : Any = " ".join(_SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , _SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
snake_case_ : Dict = f.readlines()
for lineTmp in lines:
snake_case_ : Tuple = lineTmp.strip()
snake_case_ : Dict = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
snake_case_ : Optional[Any] = line[:idx]
snake_case_ : List[Any] = len(self.encoder )
| 568 | 1 |
'''simple docstring'''
import pprint
import requests
lowerCAmelCase__ = 'https://zenquotes.io/api'
def lowerCAmelCase__ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowerCAmelCase__ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
lowerCAmelCase__ = random_quotes()
pprint.pprint(response)
| 172 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase__ = 2
class _A :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
lowerCamelCase : Optional[int]="<s>" , lowerCamelCase : str="<pad>" , lowerCamelCase : str="</s>" , lowerCamelCase : int="<unk>" , lowerCamelCase : Tuple=None , )-> str:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = bos, unk, pad, eos
snake_case__ : Dict = []
snake_case__ : int = []
snake_case__ : Optional[int] = {}
snake_case__ : int = self.add_symbol(lowerCamelCase )
snake_case__ : Optional[int] = self.add_symbol(lowerCamelCase )
snake_case__ : List[str] = self.add_symbol(lowerCamelCase )
snake_case__ : int = self.add_symbol(lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase )
snake_case__ : int = len(self.symbols )
def __eq__( self : str , lowerCamelCase : Tuple )-> Optional[Any]:
return self.indices == other.indices
def __getitem__( self : Optional[int] , lowerCamelCase : Any )-> Tuple:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Any )-> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Tuple , lowerCamelCase : int )-> int:
return sym in self.indices
@classmethod
def __lowerCAmelCase ( cls : Dict , lowerCamelCase : Union[str, Any] )-> str:
snake_case__ : List[str] = cls()
d.add_from_file(lowerCamelCase )
return d
def __lowerCAmelCase ( self : int , lowerCamelCase : int , lowerCamelCase : List[Any]=1 , lowerCamelCase : Union[str, Any]=False )-> Any:
if word in self.indices and not overwrite:
snake_case__ : Union[str, Any] = self.indices[word]
snake_case__ : str = self.count[idx] + n
return idx
else:
snake_case__ : Any = len(self.symbols )
snake_case__ : Optional[int] = idx
self.symbols.append(lowerCamelCase )
self.count.append(lowerCamelCase )
return idx
def __lowerCAmelCase ( self : Any , lowerCamelCase : List[Any] )-> Dict:
return 0
def __lowerCAmelCase ( self : int , lowerCamelCase : str )-> Optional[int]:
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(lowerCamelCase ) )
return
snake_case__ : Union[str, Any] = f.readlines()
snake_case__ : Optional[Any] = self._load_meta(lowerCamelCase )
for line in lines[indices_start_line:]:
try:
snake_case__ , snake_case__ : Optional[int] = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
snake_case__ : str = True
snake_case__ , snake_case__ : Any = line.rsplit(""" """ , 1 )
else:
snake_case__ : Dict = False
snake_case__ : Optional[int] = int(lowerCamelCase )
snake_case__ : List[str] = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(lowerCamelCase ) )
self.add_symbol(lowerCamelCase , n=lowerCamelCase , overwrite=lowerCamelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[str] = dict((re.sub(R"""@@$""" , """""" , UpperCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCAmelCase ), v) for k, v in d.items() )
snake_case__ : str = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
snake_case__ : Optional[Any] = d[k] # restore
return da
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if not os.path.exists(UpperCAmelCase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
snake_case__ : Tuple = os.path.join(UpperCAmelCase , """checkpoint.pt""" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
snake_case__ : str = torch.load(UpperCAmelCase , map_location="""cpu""" )
snake_case__ : List[Any] = chkpt["""cfg"""]["""model"""]
# dicts
snake_case__ : Optional[Any] = os.path.join(UpperCAmelCase , """dict.txt""" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
snake_case__ : List[str] = Dictionary.load(UpperCAmelCase )
snake_case__ : Optional[int] = rewrite_dict_keys(src_dict.indices )
snake_case__ : Tuple = len(UpperCAmelCase )
snake_case__ : Optional[Any] = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# merges_file (bpecodes)
snake_case__ : Union[str, Any] = os.path.join(UpperCAmelCase , """bpecodes""" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
snake_case__ : Tuple = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
# model config
snake_case__ : str = os.path.join(UpperCAmelCase , """config.json""" )
snake_case__ : Dict = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.0_2,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-1_2,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# tokenizer config
snake_case__ : int = os.path.join(UpperCAmelCase , UpperCAmelCase )
snake_case__ : List[str] = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# model
snake_case__ : int = chkpt["""model"""]
# remove unneeded keys
snake_case__ : List[Any] = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
snake_case__ : List[Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
snake_case__ : str = model_state_dict.pop(UpperCAmelCase )
else:
snake_case__ : Optional[int] = model_state_dict.pop(UpperCAmelCase )
snake_case__ : Tuple = BioGptConfig.from_pretrained(UpperCAmelCase )
snake_case__ : Optional[int] = BioGptForCausalLM(UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase )
# save
snake_case__ : Dict = os.path.join(UpperCAmelCase , UpperCAmelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCAmelCase , UpperCAmelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 172 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = """tokenizer_file"""
UpperCAmelCase__ = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def A_ ( self : str ) -> Any:
super().setUp()
lowerCamelCase__ : Optional[int] = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Any , **UpperCAmelCase : Any ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
lowerCamelCase__ : str = self.get_rust_tokenizer()
lowerCamelCase__ : Dict = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowerCamelCase__ : Optional[int] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowerCamelCase__ : Any = tokenizer.batch_encode_plus(UpperCAmelCase )['input_ids']
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any]=6 ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCamelCase__ : List[Any] = 'This is a simple input'
lowerCamelCase__ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ : Union[str, Any] = ('This is a simple input', 'This is a pair')
lowerCamelCase__ : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase , max_length=UpperCAmelCase )
tokenizer_r.encode_plus(UpperCAmelCase , max_length=UpperCAmelCase )
tokenizer_r.batch_encode_plus(UpperCAmelCase , max_length=UpperCAmelCase )
tokenizer_r.encode(UpperCAmelCase , max_length=UpperCAmelCase )
tokenizer_r.batch_encode_plus(UpperCAmelCase , max_length=UpperCAmelCase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
lowerCamelCase__ : Union[str, Any] = None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='max_length' , )
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : Union[str, Any] = self.get_rust_tokenizer()
lowerCamelCase__ : str = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCAmelCase )
lowerCamelCase__ : int = next(iter(UpperCAmelCase ) )['premise'] # pick up one data
lowerCamelCase__ : Tuple = list(sample_data.values() )
lowerCamelCase__ : Dict = list(map(tokenizer.encode , UpperCAmelCase ) )
lowerCamelCase__ : int = [tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Dict ) -> str:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 295 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""pixel_values"""]
def __init__( self : Any , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 0.9 , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : str , ) -> None:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : int = size if size is not None else {'shortest_edge': 224}
lowerCamelCase__ : Tuple = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase__ : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase__ : str = get_size_dict(UpperCAmelCase , param_name='crop_size' )
lowerCamelCase__ : Tuple = do_resize
lowerCamelCase__ : str = size
lowerCamelCase__ : List[str] = crop_pct
lowerCamelCase__ : Any = resample
lowerCamelCase__ : Tuple = do_center_crop
lowerCamelCase__ : Any = crop_size
lowerCamelCase__ : Optional[int] = do_rescale
lowerCamelCase__ : Optional[Any] = rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ) -> np.ndarray:
lowerCamelCase__ : Any = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCamelCase__ : List[Any] = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCamelCase__ : int = int(size['height'] / crop_pct )
else:
lowerCamelCase__ : Any = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase , size=UpperCAmelCase , default_to_square=UpperCAmelCase )
else:
if "shortest_edge" in size:
lowerCamelCase__ : int = get_resize_output_image_size(UpperCAmelCase , size=size['shortest_edge'] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowerCamelCase__ : List[Any] = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(UpperCAmelCase ) )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : str , ) -> np.ndarray:
lowerCamelCase__ : Union[str, Any] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size['height'], size['width']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ) -> int:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : str , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Any , ) -> PIL.Image.Image:
lowerCamelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__ : Any = resample if resample is not None else self.resample
lowerCamelCase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : str = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Optional[Any] = size if size is not None else self.size
lowerCamelCase__ : Optional[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : int = get_size_dict(UpperCAmelCase , param_name='crop_size' )
lowerCamelCase__ : Any = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Dict = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ : Any = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , crop_pct=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Optional[Any] = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : int = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : List[Any] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowerCamelCase__ : Dict = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCamelCase__ : Dict = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 295 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__a : Optional[datasets.Features] =None
__a : str ="utf-8"
__a : Optional[str] =None
__a : Optional[str] =None
__a : bool =True # deprecated
__a : Optional[int] =None # deprecated
__a : int =1_0 << 2_0 # 10MB
__a : Optional[bool] =None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__a : str =JsonConfig
def __snake_case ( self ):
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowerCAmelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self , UpperCAmelCase_ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
lowerCAmelCase = data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [files]
lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [files]
lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def __snake_case ( self , UpperCAmelCase_ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type
lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema )
return pa_table
def __snake_case ( self , UpperCAmelCase_ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase = json.load(UpperCAmelCase_ )
# We keep only the field we are interested in
lowerCAmelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase_ , (list, tuple) ):
lowerCAmelCase = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
else:
lowerCAmelCase = dataset
lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
yield file_idx, self._cast_table(UpperCAmelCase_ )
# If the file has one json object per line
else:
with open(UpperCAmelCase_ , '''rb''' ) as f:
lowerCAmelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 )
lowerCAmelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowerCAmelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' )
try:
while True:
try:
lowerCAmelCase = paj.read_json(
io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase_ , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase_ )
or block_size > len(UpperCAmelCase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase = json.load(UpperCAmelCase_ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON
try:
lowerCAmelCase = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(UpperCAmelCase_ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ )
batch_idx += 1
| 33 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ):
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = load_from_cache_file
lowerCAmelCase = file_format
lowerCAmelCase = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __snake_case ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 33 | 1 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _snake_case ( A_ ):
'''simple docstring'''
__snake_case = "char"
__snake_case = "bpe"
__snake_case = "wp"
_SCREAMING_SNAKE_CASE : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _snake_case ( A_ ):
'''simple docstring'''
__snake_case = ["image_processor", "char_tokenizer"]
__snake_case = "ViTImageProcessor"
__snake_case = "MgpstrTokenizer"
def __init__( self: str , __UpperCamelCase: Dict=None , __UpperCamelCase: int=None , **__UpperCamelCase: Any ) -> Optional[Any]:
__magic_name__ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case__ , )
__magic_name__ : Optional[Any] = kwargs.pop("feature_extractor" )
__magic_name__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
__magic_name__ : List[str] = tokenizer
__magic_name__ : Any = AutoTokenizer.from_pretrained("gpt2" )
__magic_name__ : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(snake_case__ , snake_case__ )
def __call__( self: Dict , __UpperCamelCase: int=None , __UpperCamelCase: Dict=None , __UpperCamelCase: Optional[int]=None , **__UpperCamelCase: int ) -> Optional[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
__magic_name__ : List[Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None:
__magic_name__ : Optional[int] = self.char_tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__magic_name__ : Dict = encodings["input_ids"]
return inputs
def lowerCAmelCase__ ( self: Optional[Any] , __UpperCamelCase: Optional[int] ) -> Optional[int]:
__magic_name__ : Dict = sequences
__magic_name__ : List[Any] = char_preds.size(0 )
__magic_name__ : List[Any] = self._decode_helper(snake_case__ , "char" )
__magic_name__ : Optional[int] = self._decode_helper(snake_case__ , "bpe" )
__magic_name__ : Any = self._decode_helper(snake_case__ , "wp" )
__magic_name__ : List[str] = []
__magic_name__ : List[str] = []
for i in range(snake_case__ ):
__magic_name__ : Optional[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__magic_name__ : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__magic_name__ : Dict = scores.index(max(snake_case__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[int] = final_strs
__magic_name__ : str = final_scores
__magic_name__ : Optional[Any] = char_strs
__magic_name__ : List[str] = bpe_strs
__magic_name__ : int = wp_strs
return out
def lowerCAmelCase__ ( self: str , __UpperCamelCase: List[Any] , __UpperCamelCase: Optional[int] ) -> Tuple:
if format == DecodeType.CHARACTER:
__magic_name__ : List[Any] = self.char_decode
__magic_name__ : List[str] = 1
__magic_name__ : int = "[s]"
elif format == DecodeType.BPE:
__magic_name__ : List[str] = self.bpe_decode
__magic_name__ : Dict = 2
__magic_name__ : Union[str, Any] = "#"
elif format == DecodeType.WORDPIECE:
__magic_name__ : Optional[Any] = self.wp_decode
__magic_name__ : str = 102
__magic_name__ : Any = "[SEP]"
else:
raise ValueError(f"""Format {format} is not supported.""" )
__magic_name__ : Optional[Any] = [], []
__magic_name__ : Tuple = pred_logits.size(0 )
__magic_name__ : List[Any] = pred_logits.size(1 )
__magic_name__ : int = pred_logits.topk(1 , dim=-1 , largest=snake_case__ , sorted=snake_case__ )
__magic_name__ : str = preds_index.view(-1 , snake_case__ )[:, 1:]
__magic_name__ : Tuple = decoder(snake_case__ )
__magic_name__ : str = torch.nn.functional.softmax(snake_case__ , dim=2 ).max(dim=2 )
__magic_name__ : Dict = preds_max_prob[:, 1:]
for index in range(snake_case__ ):
__magic_name__ : Optional[Any] = preds_str[index].find(snake_case__ )
__magic_name__ : List[str] = preds_str[index][:pred_eos]
__magic_name__ : List[str] = preds_index[index].cpu().tolist()
__magic_name__ : Optional[Any] = pred_index.index(snake_case__ ) if eos_token in pred_index else -1
__magic_name__ : Tuple = preds_max_prob[index][: pred_eos_index + 1]
__magic_name__ : Optional[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(snake_case__ )
conf_scores.append(snake_case__ )
return dec_strs, conf_scores
def lowerCAmelCase__ ( self: Union[str, Any] , __UpperCamelCase: Optional[int] ) -> Tuple:
__magic_name__ : str = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(snake_case__ )]
return decode_strs
def lowerCAmelCase__ ( self: Optional[Any] , __UpperCamelCase: int ) -> List[Any]:
return self.bpe_tokenizer.batch_decode(snake_case__ )
def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: List[str] ) -> Any:
__magic_name__ : Optional[int] = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(snake_case__ )]
return decode_strs | 436 |
import random
class UpperCAmelCase :
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
snake_case : int = [ord(snake_case__ ) for i in text]
snake_case : Optional[int] = []
snake_case : int = []
for i in plain:
snake_case : List[Any] = random.randint(1 , 3_00 )
snake_case : List[Any] = (i + k) * k
cipher.append(snake_case__ )
key.append(snake_case__ )
return cipher, key
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : list[int] , snake_case__ : list[int] ) -> str:
'''simple docstring'''
snake_case : int = []
for i in range(len(snake_case__ ) ):
snake_case : List[str] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case__ ) )
return "".join(snake_case__ )
if __name__ == "__main__":
__lowerCamelCase, __lowerCamelCase = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 204 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 200 ) -> int:
_lowercase = [1, 2, 5, 10, 20, 50, 100, 200]
_lowercase = [0] * (pence + 1)
_lowercase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(UpperCAmelCase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2 | 715 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :int ) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(snake_case__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 535 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase__ :
def __A ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return None
class lowercase__ :
def __A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
return None
class lowercase__ ( unittest.TestCase):
UpperCamelCase_ = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __A ( self : str ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase__ , '''tf''' , 12 , **UpperCamelCase__ )
@require_torch
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase__ , '''pt''' , 12 , **UpperCamelCase__ )
@require_torch
@slow
def __A ( self : int ):
'''simple docstring'''
from transformers import BertModel
SCREAMING_SNAKE_CASE : int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(UpperCamelCase__ ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE : Any = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE : List[str] = BertModel(BertConfig(vocab_size=len(UpperCamelCase__ ) ) )
model.save_pretrained(UpperCamelCase__ )
self._test_export(UpperCamelCase__ , '''pt''' , 12 , UpperCamelCase__ )
@require_tf
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE : Dict = self._test_export(UpperCamelCase__ , '''tf''' , 12 , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = quantize(Path(UpperCamelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def __A ( self : int ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE : Optional[int] = self._test_export(UpperCamelCase__ , '''pt''' , 12 , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = quantize(UpperCamelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def __A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE : Any = Path(UpperCamelCase__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
return path
except Exception as e:
self.fail(UpperCamelCase__ )
@require_torch
@require_tokenizers
@slow
def __A ( self : Tuple ):
'''simple docstring'''
from transformers import BertModel
SCREAMING_SNAKE_CASE : List[str] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
SCREAMING_SNAKE_CASE : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(UpperCamelCase__ , UpperCamelCase__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
from transformers import TFBertModel
SCREAMING_SNAKE_CASE : Dict = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(UpperCamelCase__ , UpperCamelCase__ , '''tf''' )
def __A ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = FeatureExtractionPipeline(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = infer_shapes(UpperCamelCase__ , UpperCamelCase__ )
# Assert all variables are present
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCamelCase__ )
self.assertSequenceEqual(variable_names[3:] , UpperCamelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = ensure_valid_input(FuncContiguousArgs() , UpperCamelCase__ , UpperCamelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCamelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCamelCase__ ) , set(UpperCamelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCamelCase__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = ensure_valid_input(FuncNonContiguousArgs() , UpperCamelCase__ , UpperCamelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCamelCase__ ) , 1 )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 248 | import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__UpperCamelCase : Optional[Any] = '<<<<<<< This should probably be modified because it mentions: '
__UpperCamelCase : Optional[Any] = '=======\n>>>>>>>\n'
__UpperCamelCase : Optional[int] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__UpperCamelCase : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def A ( _lowercase ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase__ ( UpperCamelCase_):
@staticmethod
def __A ( UpperCamelCase__ : ArgumentParser ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str , *UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = get_logger('''datasets-cli/converting''' )
SCREAMING_SNAKE_CASE : List[str] = tfds_path
SCREAMING_SNAKE_CASE : Optional[int] = datasets_directory
def __A ( self : Dict ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
SCREAMING_SNAKE_CASE : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
SCREAMING_SNAKE_CASE : str = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Dict = {}
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE : List[str] = os.listdir(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not os.path.isfile(UpperCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.readlines()
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = []
for line in lines:
SCREAMING_SNAKE_CASE : List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE : List[str] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE : Any = '''from datasets import logging\n'''
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE : Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda UpperCamelCase__ : e in out_line , UpperCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase__ ) + '''\n''' )
out_lines.append(UpperCamelCase__ )
out_lines.append(UpperCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE : Any = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE : Optional[int] = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
SCREAMING_SNAKE_CASE : List[Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE : Optional[int] = True
out_lines.append(UpperCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE : Dict = f_name.replace('''.py''' , '''''' )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase__ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase__ )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE : Tuple = os.path.basename(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(UpperCamelCase__ , UpperCamelCase__ )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 248 | 1 |
from __future__ import annotations
import math
def UpperCAmelCase_ ( __UpperCamelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(__UpperCamelCase ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase_ = [num for num in range(3, 100001, 2) if not is_prime(num)]
def UpperCAmelCase_ ( __UpperCamelCase ):
if not isinstance(__UpperCamelCase, __UpperCamelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
SCREAMING_SNAKE_CASE__ =[]
for num in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ =0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE__ =odd_composites[num] - 2 * i * i
if is_prime(__UpperCamelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__UpperCamelCase ) == n:
return list_nums
return []
def UpperCAmelCase_ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 588 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase_ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase_ = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =(
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ =bs[:]
SCREAMING_SNAKE_CASE__ =0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ =[chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase, __UpperCamelCase ) )
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =set()
SCREAMING_SNAKE_CASE__ =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ =char
return pairs
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : List[Any] = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_VOCAB_FILES_MAP
_A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple ,_UpperCamelCase : List[Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Optional[Any]="replace" ,_UpperCamelCase : Tuple="<s>" ,_UpperCamelCase : Tuple="</s>" ,_UpperCamelCase : str="</s>" ,_UpperCamelCase : str="<s>" ,_UpperCamelCase : int="<unk>" ,_UpperCamelCase : str="<pad>" ,_UpperCamelCase : Tuple="<mask>" ,_UpperCamelCase : Any=False ,**_UpperCamelCase : Any ,) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else bos_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else eos_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else sep_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else cls_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else unk_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token
super().__init__(
errors=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,**_UpperCamelCase ,)
with open(_UpperCamelCase ,encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ =json.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ ={v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ =errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ =bytes_to_unicode()
SCREAMING_SNAKE_CASE__ ={v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase ,encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ =merges_handle.read().split("""\n""" )[1:-1]
SCREAMING_SNAKE_CASE__ =[tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ =dict(zip(_UpperCamelCase ,range(len(_UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ =re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self : Dict ) -> str:
'''simple docstring'''
return len(self.encoder )
def __A ( self : Tuple ) -> List[str]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __A ( self : Tuple ,_UpperCamelCase : Any ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ =tuple(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ =min(_UpperCamelCase ,key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =bigram
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =0
while i < len(_UpperCamelCase ):
try:
SCREAMING_SNAKE_CASE__ =word.index(_UpperCamelCase ,_UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ =j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ =tuple(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =new_word
if len(_UpperCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ =get_pairs(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =""" """.join(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =word
return word
def __A ( self : int ,_UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[]
for token in re.findall(self.pat ,_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ ="""""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def __A ( self : List[str] ,_UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(_UpperCamelCase ,self.encoder.get(self.unk_token ) )
def __A ( self : int ,_UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
return self.decoder.get(_UpperCamelCase )
def __A ( self : Dict ,_UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""""".join(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def __A ( self : Optional[Any] ,_UpperCamelCase : str ,_UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ =os.path.join(
_UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ =os.path.join(
_UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_UpperCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_UpperCamelCase ,ensure_ascii=_UpperCamelCase ) + """\n""" )
SCREAMING_SNAKE_CASE__ =0
with open(_UpperCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ =token_index
writer.write(""" """.join(_UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def __A ( self : str ,_UpperCamelCase : List[int] ,_UpperCamelCase : Optional[List[int]] = None ,_UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase ,token_ids_a=_UpperCamelCase ,already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def __A ( self : Any ,_UpperCamelCase : List[int] ,_UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Dict ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Tuple=False ,**_UpperCamelCase : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ =""" """ + text
return (text, kwargs)
def __A ( self : List[Any] ,_UpperCamelCase : List[int] ,_UpperCamelCase : Optional[List[int]] = None ) -> Optional[Any]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __A ( self : int ,_UpperCamelCase : "Conversation" ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =""" """.join(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.encode(_UpperCamelCase )
if len(_UpperCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ =input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 588 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = BarthezTokenizer
A__ : Optional[Any] = BarthezTokenizerFast
A__ : Any = True
A__ : List[Any] = True
def snake_case_ ( self : Optional[Any] ):
super().setUp()
__lowercase : Dict = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_snake_case )
__lowercase : Tuple = tokenizer
def snake_case_ ( self : Dict ):
__lowercase : Optional[Any] = '''<pad>'''
__lowercase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def snake_case_ ( self : List[str] ):
__lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_snake_case ) , 10_1122 )
def snake_case_ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowercase : str = [0, 57, 3018, 7_0307, 91, 2]
__lowercase : Union[str, Any] = self.tokenizer(
_snake_case , max_length=len(_snake_case ) , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''' )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowercase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : Dict ):
if not self.test_rust_tokenizer:
return
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Union[str, Any] = self.get_rust_tokenizer()
__lowercase : List[Any] = '''I was born in 92000, and this is falsé.'''
__lowercase : List[Any] = tokenizer.tokenize(_snake_case )
__lowercase : Any = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowercase : str = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
__lowercase : Optional[Any] = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowercase : Optional[Any] = self.get_rust_tokenizer()
__lowercase : Dict = tokenizer.encode(_snake_case )
__lowercase : Dict = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def snake_case_ ( self : int ):
# fmt: off
__lowercase : List[str] = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowercase : Dict = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=_snake_case , )
| 509 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , _snake_case : int = 101 ):
__lowercase : Tuple = length
def __len__( self : str ):
return self.length
def __getitem__( self : List[Any] , _snake_case : List[str] ):
return i
class __lowerCAmelCase :
"""simple docstring"""
def __call__( self : Union[str, Any] , _snake_case : List[str] ):
return {"input_ids": torch.tensor(_snake_case ), "labels": torch.tensor(_snake_case )}
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__lowercase : str = nn.Linear(120 , 80 )
def snake_case_ ( self : Tuple , _snake_case : List[str] , _snake_case : Optional[int]=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def snake_case_ ( self : Tuple ):
__lowercase : List[str] = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowercase : Any = self.get_auto_remove_tmp_dir()
__lowercase : List[str] = F'--output_dir {output_dir}'.split()
__lowercase : Optional[Any] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def snake_case_ ( self : str ):
__lowercase : Tuple = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowercase : Dict = self.get_auto_remove_tmp_dir()
__lowercase : str = F'--output_dir {output_dir}'.split()
__lowercase : Dict = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase : int = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase : int = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__lowerCAmelCase : str = DummyDataset(dataset_length)
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
__lowercase : Dict = list(range(len(__lowerCAmelCase ) ) )
__lowercase : Optional[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCAmelCase : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Optional[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Any = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : List[str] = None
| 509 | 1 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase__ ( _a , unittest.TestCase ):
'''simple docstring'''
a : Any = BertJapaneseTokenizer
a : List[Any] = False
a : List[Any] = True
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : int , A_ : Optional[int] ):
'''simple docstring'''
__lowercase = """こんにちは、世界。 \nこんばんは、世界。"""
__lowercase = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : Tuple ):
'''simple docstring'''
__lowercase , __lowercase = self.get_input_output_texts(A_ )
__lowercase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowercase = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
return text, ids
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(A_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(A_ )
__lowercase = """こんにちは、世界。\nこんばんは、世界。"""
__lowercase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowercase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(A_ , """wb""" ) as handle:
pickle.dump(A_ , A_ )
with open(A_ , """rb""" ) as handle:
__lowercase = pickle.load(A_ )
__lowercase = tokenizer_new.tokenize(A_ )
self.assertListEqual(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = MecabTokenizer(do_lower_case=A_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
try:
__lowercase = MecabTokenizer(
do_lower_case=A_ , normalize_text=A_ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = MecabTokenizer(normalize_text=A_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(A_ )
__lowercase = """こんにちは、世界。\nこんばんは、世界。"""
__lowercase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowercase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(A_ , """wb""" ) as handle:
pickle.dump(A_ , A_ )
with open(A_ , """rb""" ) as handle:
__lowercase = pickle.load(A_ )
__lowercase = tokenizer_new.tokenize(A_ )
self.assertListEqual(A_ , A_ )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = SudachiTokenizer(do_lower_case=A_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = SudachiTokenizer(normalize_text=A_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = SudachiTokenizer(trim_whitespace=A_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(A_ )
__lowercase = """こんにちは、世界。\nこんばんは、世界。"""
__lowercase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowercase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(A_ , """wb""" ) as handle:
pickle.dump(A_ , A_ )
with open(A_ , """rb""" ) as handle:
__lowercase = pickle.load(A_ )
__lowercase = tokenizer_new.tokenize(A_ )
self.assertListEqual(A_ , A_ )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = JumanppTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = JumanppTokenizer(normalize_text=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = JumanppTokenizer(trim_whitespace=A_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__lowercase = {}
for i, token in enumerate(A_ ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=A_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
__lowercase = tokenizer.subword_tokenizer
__lowercase = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(A_ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
__lowercase = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(A_ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
__lowercase = tokenizer.encode("""ありがとう。""" , add_special_tokens=A_ )
__lowercase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=A_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(A_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase__ ( _a , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = BertJapaneseTokenizer
a : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
super().setUp()
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **A_ : List[str] ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : Dict ):
'''simple docstring'''
__lowercase = """こんにちは、世界。 \nこんばんは、世界。"""
__lowercase = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
__lowercase = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
A_ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowercase = {}
for i, token in enumerate(A_ ):
__lowercase = i
__lowercase = CharacterTokenizer(vocab=A_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
__lowercase = tokenizer.encode("""ありがとう。""" , add_special_tokens=A_ )
__lowercase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=A_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(A_ )
__lowercase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = """cl-tohoku/bert-base-japanese"""
__lowercase = AutoTokenizer.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(A_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
__lowercase = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(A_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 712 |
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def lowerCAmelCase_ ( UpperCamelCase__ : Dict ):
"""simple docstring"""
__lowercase = min(UpperCamelCase__ ) # min() finds the minimum value
__lowercase = max(UpperCamelCase__ ) # max() finds the maximum value
__lowercase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowercase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowercase = 0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
__lowercase = count + min_val
i += 1
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print("""Sorted order is:""" , """ """.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 442 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
A_ : Dict = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
A_ : Dict = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
A_ : List[str] = primes[:idx]
break
A_ , A_ : List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
A_ : str = False
for r in range(SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = pow(SCREAMING_SNAKE_CASE , d * 2**r , SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
A_ : Dict = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _SCREAMING_SNAKE_CASE ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 590 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = True , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
A_ : str = False
if main_process_only:
A_ : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , disable=SCREAMING_SNAKE_CASE )
| 590 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = []
for part_id in partition_order:
_lowerCAmelCase : List[Any] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : List[Any] = spark.range(100 ).repartition(1 )
_lowerCAmelCase : Optional[Any] = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Dict = spark.range(10 ).repartition(2 )
_lowerCAmelCase : Union[str, Any] = [1, 0]
_lowerCAmelCase : Any = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
_lowerCAmelCase : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCAmelCase : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : List[Any] = spark.range(10 ).repartition(1 )
_lowerCAmelCase : Dict = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Tuple = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
_lowerCAmelCase : Dict = lambda _lowerCamelCase : x.reverse()
_lowerCAmelCase : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
_lowerCAmelCase : List[str] = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_lowerCAmelCase : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : int = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCAmelCase : Optional[int] = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCAmelCase : str = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Optional[int] = spark.range(100 ).repartition(1 )
_lowerCAmelCase : Optional[Any] = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
SCREAMING_SNAKE_CASE: List[Any] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
SCREAMING_SNAKE_CASE: Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
SCREAMING_SNAKE_CASE: Dict = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _a ( lowerCAmelCase , lowerCAmelCase )-> Optional[Any]:
return float((preds == labels).mean() )
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="binary" )-> str:
SCREAMING_SNAKE_CASE_ = simple_accuracy(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = float(fa_score(y_true=lowerCAmelCase , y_pred=lowerCAmelCase , average=lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( lowerCAmelCase , lowerCAmelCase )-> Dict:
SCREAMING_SNAKE_CASE_ = {}
for id_pred, label in zip(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
SCREAMING_SNAKE_CASE_ = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
SCREAMING_SNAKE_CASE_ = [(pred, label)]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], []
for question, preds_labels in question_map.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = zip(*lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = fa_score(y_true=lowerCAmelCase , y_pred=lowerCAmelCase , average='macro' )
fas.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase ) )
ems.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = float(sum(lowerCAmelCase ) / len(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = sum(lowerCAmelCase ) / len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = float(fa_score(y_true=lowerCAmelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __a ( self : Union[str, Any] ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __a ( self : List[Any] ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __a ( self : str , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(snake_case__ , snake_case__ )}
elif self.config_name == "cb":
return acc_and_fa(snake_case__ , snake_case__ , fa_avg='macro' )
elif self.config_name == "record":
SCREAMING_SNAKE_CASE_ = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
SCREAMING_SNAKE_CASE_ = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(snake_case__ , snake_case__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(snake_case__ , snake_case__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) | 360 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
@staticmethod
def __a ( *snake_case__ : List[Any] , **snake_case__ : List[Any] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class lowercase_ (unittest.TestCase ):
lowerCAmelCase__ =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def __a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE_ = len(snake_case__ )
self.assertGreater(snake_case__ , 0 )
self.assertEqual(
snake_case__ , [
{
'score': ANY(snake_case__ ),
'label': ANY(snake_case__ ),
'box': {'xmin': ANY(snake_case__ ), 'ymin': ANY(snake_case__ ), 'xmax': ANY(snake_case__ ), 'ymax': ANY(snake_case__ )},
}
for i in range(snake_case__ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
SCREAMING_SNAKE_CASE_ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
SCREAMING_SNAKE_CASE_ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
@slow
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0.2
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , ) | 360 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a : Union[str, Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : List[str] = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
a : Optional[Any] = {
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
a : int = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> Tuple:
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , A ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , A ) != strip_accents
):
UpperCAmelCase : Any = getattr(A , pre_tok_state.pop("""type""" ) )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[Any] = strip_accents
UpperCAmelCase : Optional[int] = pre_tok_class(**A )
UpperCAmelCase : str = do_lower_case
def __getstate__( self ) -> Any:
UpperCAmelCase : List[Any] = self.__dict__.copy()
UpperCAmelCase : Tuple = BertPreTokenizer()
return state
def __setstate__( self , A ) -> List[Any]:
UpperCAmelCase : int = d
UpperCAmelCase : List[str] = self.__dict__["""_tokenizer"""].get_vocab()
UpperCAmelCase : Optional[int] = PreTokenizer.custom(JiebaPreTokenizer(A ) )
def _lowercase( self , A , A=None ) -> Tuple:
UpperCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase( self , A , A = None ) -> Tuple[str]:
UpperCAmelCase : Dict = self._tokenizer.model.save(A , name=A )
return tuple(A )
def _lowercase( self , A , A=None , A=None , A=False , **A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = BertPreTokenizer()
return super().save_pretrained(A , A , A , A , **A )
| 672 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) or n < 0:
raise ValueError("Invalid input" )
a : str = 10**n
a : Any = 2_8433 * (pow(2 , 783_0457 , _lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 633 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] ) ->float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
a : Dict = sum(_lowercase ) / len(_lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase_ ( __snake_case , __snake_case ):
_lowerCamelCase = 'swin'
_lowerCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : Union[str, Any] = image_size
_snake_case : Union[str, Any] = patch_size
_snake_case : str = num_channels
_snake_case : str = embed_dim
_snake_case : Any = depths
_snake_case : Tuple = len(lowercase_ )
_snake_case : Union[str, Any] = num_heads
_snake_case : Dict = window_size
_snake_case : List[Any] = mlp_ratio
_snake_case : Dict = qkv_bias
_snake_case : Any = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : int = drop_path_rate
_snake_case : str = hidden_act
_snake_case : Tuple = use_absolute_embeddings
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : int = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Tuple = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
_snake_case : List[Any] = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
_snake_case ,_snake_case : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class lowercase_ ( __snake_case ):
_lowerCamelCase = version.parse('1.11' )
@property
def UpperCamelCase ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase ( self ):
return 1e-4 | 580 | from string import ascii_uppercase
__SCREAMING_SNAKE_CASE : Any = {char: i for i, char in enumerate(ascii_uppercase)}
__SCREAMING_SNAKE_CASE : str = dict(enumerate(ascii_uppercase))
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : str = len(__lowercase )
_snake_case : List[Any] = 0
while True:
if x == i:
_snake_case : Tuple = 0
if len(__lowercase ) == len(__lowercase ):
break
key += key[i]
i += 1
return key
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : Dict = ""
_snake_case : Optional[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_snake_case : List[str] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = ""
_snake_case : Optional[int] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_snake_case : Dict = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def snake_case () -> None:
'''simple docstring'''
_snake_case : Any = "THE GERMAN ATTACK"
_snake_case : Optional[Any] = "SECRET"
_snake_case : List[Any] = generate_key(__lowercase , __lowercase )
_snake_case : int = cipher_text(__lowercase , __lowercase )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(__lowercase , __lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 580 | 1 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_ ( UpperCamelCase__ : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 616 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ =logging.get_logger(__name__)
class lowerCamelCase__ ( _a ):
a : Optional[int] = """vision-encoder-decoder"""
a : Dict = True
def __init__( self : Dict , **A_ : List[Any] ):
'''simple docstring'''
super().__init__(**A_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
__lowercase = kwargs.pop("""encoder""" )
__lowercase = encoder_config.pop("""model_type""" )
__lowercase = kwargs.pop("""decoder""" )
__lowercase = decoder_config.pop("""model_type""" )
__lowercase = AutoConfig.for_model(A_ , **A_ )
__lowercase = AutoConfig.for_model(A_ , **A_ )
__lowercase = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , A_ : PretrainedConfig , A_ : PretrainedConfig , **A_ : Any ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__lowercase = True
__lowercase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.encoder.to_dict()
__lowercase = self.decoder.to_dict()
__lowercase = self.__class__.model_type
return output
class lowerCamelCase__ ( _a ):
a : Dict = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class lowerCamelCase__ ( _a ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = OrderedDict()
__lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowercase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : "PreTrainedTokenizerBase" , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
__lowercase = OrderedDict()
__lowercase = super().generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
__lowercase , __lowercase = dummy_input["""input_ids"""].shape
__lowercase = (batch, encoder_sequence, self._config.encoder_hidden_size)
__lowercase = dummy_input.pop("""input_ids""" )
__lowercase = dummy_input.pop("""attention_mask""" )
__lowercase = torch.zeros(A_ )
return common_inputs
class lowerCamelCase__ ( _a ):
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(A_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : PretrainedConfig , A_ : PretrainedConfig , A_ : str = "default" ):
'''simple docstring'''
__lowercase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(A_ , A_ )
| 616 | 1 |
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def SCREAMING_SNAKE_CASE( *UpperCamelCase ) -> Optional[int]:
with open(UpperCamelCase ,'r' ) as fh:
fcntl.flock(UpperCamelCase ,fcntl.LOCK_EX )
try:
print(*UpperCamelCase )
finally:
fcntl.flock(UpperCamelCase ,fcntl.LOCK_UN )
lowerCAmelCase__ = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ = torch.device("cuda", local_rank)
lowerCAmelCase__ = socket.gethostname()
lowerCAmelCase__ = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ = dist.get_rank()
lowerCAmelCase__ = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise
| 705 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase ( a_ ):
_lowerCamelCase : List[str]= "Wav2Vec2FeatureExtractor"
_lowerCamelCase : Any= "AutoTokenizer"
def __init__( self , _snake_case , _snake_case) -> List[str]:
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ : int = self.feature_extractor
UpperCAmelCase_ : str = False
@classmethod
def _snake_case ( cls , _snake_case , **_snake_case) -> List[Any]:
try:
return super().from_pretrained(_snake_case , **_snake_case)
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , _snake_case , )
UpperCAmelCase_ : Any = WavaVecaFeatureExtractor.from_pretrained(_snake_case , **_snake_case)
UpperCAmelCase_ : List[str] = WavaVecaCTCTokenizer.from_pretrained(_snake_case , **_snake_case)
return cls(feature_extractor=_snake_case , tokenizer=_snake_case)
def __call__( self , *_snake_case , **_snake_case) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
UpperCAmelCase_ : List[str] = kwargs.pop('raw_speech')
else:
UpperCAmelCase_ : Dict = kwargs.pop('audio' , _snake_case)
UpperCAmelCase_ : str = kwargs.pop('sampling_rate' , _snake_case)
UpperCAmelCase_ : List[str] = kwargs.pop('text' , _snake_case)
if len(_snake_case) > 0:
UpperCAmelCase_ : List[str] = args[0]
UpperCAmelCase_ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
UpperCAmelCase_ : Union[str, Any] = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case)
if text is not None:
UpperCAmelCase_ : Optional[int] = self.tokenizer(_snake_case , **_snake_case)
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self , *_snake_case , **_snake_case) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_snake_case , **_snake_case)
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('input_features' , _snake_case)
UpperCAmelCase_ : Dict = kwargs.pop('labels' , _snake_case)
if len(_snake_case) > 0:
UpperCAmelCase_ : Optional[Any] = args[0]
UpperCAmelCase_ : int = args[1:]
if input_features is not None:
UpperCAmelCase_ : Dict = self.feature_extractor.pad(_snake_case , *_snake_case , **_snake_case)
if labels is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.pad(_snake_case , **_snake_case)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ : Union[str, Any] = labels['input_ids']
return input_features
def _snake_case ( self , *_snake_case , **_snake_case) -> Dict:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def _snake_case ( self , *_snake_case , **_snake_case) -> Tuple:
return self.tokenizer.decode(*_snake_case , **_snake_case)
@contextmanager
def _snake_case ( self) -> Tuple:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Tuple = self.tokenizer
yield
UpperCAmelCase_ : Tuple = self.feature_extractor
UpperCAmelCase_ : Tuple = False
| 471 | 0 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
UpperCamelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 386 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__magic_name__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__magic_name__ ) , 0 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
# Check that tokenizer_type ≠ model_type
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , config=__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__magic_name__ , """vocab.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""bert""" , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__magic_name__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__magic_name__ , """merges.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""gpt2""" , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@require_tokenizers
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__magic_name__ , """vocab.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""bert""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__magic_name__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__magic_name__ , """merges.txt""" ) )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""gpt2""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with pytest.raises(__magic_name__ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCamelCase = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __magic_name__ )
else:
self.assertEqual(tokenizer.do_lower_case , __magic_name__ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__magic_name__ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
UpperCamelCase = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = TOKENIZER_MAPPING.values()
UpperCamelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__magic_name__ )
@require_tokenizers
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__magic_name__ ) , __magic_name__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __magic_name__ )
@require_tokenizers
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__magic_name__ )
UpperCamelCase = """Hello, world. How are you?"""
UpperCamelCase = tokenizer.tokenize(__magic_name__ )
self.assertEqual("""[UNK]""" , tokens[0] )
UpperCamelCase = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__magic_name__ )
UpperCamelCase = tokenizer.tokenize(__magic_name__ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__magic_name__ ) , __magic_name__ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = get_tokenizer_config("""bert-base-cased""" )
UpperCamelCase = config.pop("""_commit_hash""" , __magic_name__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__magic_name__ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCamelCase = get_tokenizer_config(__magic_name__ )
self.assertDictEqual(__magic_name__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = get_tokenizer_config(__magic_name__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
UpperCamelCase = CustomTokenizer.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __magic_name__ )
# Can register in two steps
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__magic_name__ , slow_tokenizer_class=__magic_name__ , fast_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = BertTokenizerFast.from_pretrained(__magic_name__ )
bert_tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaises(__magic_name__ ):
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__magic_name__ ):
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
class UpperCAmelCase ( __snake_case ):
lowercase = False
class UpperCAmelCase ( __snake_case ):
lowercase = NewTokenizer
lowercase = False
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
UpperCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
__magic_name__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
__magic_name__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 386 | 1 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
while b:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = b, a % b
return a
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(__UpperCamelCase , a % b )
def lowerCAmelCase ( ):
'''simple docstring'''
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 194 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowercase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = StableDiffusionControlNetImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=A ,set_alpha_to_one=A ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase__ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
UpperCAmelCase__ : List[str] = CLIPTextModel(A )
UpperCAmelCase__ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase__ : Optional[int] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowercase ( self : Union[str, Any] ,A : Dict ,A : Optional[Any]=0 ):
'''simple docstring'''
if str(A ).startswith("""mps""" ):
UpperCAmelCase__ : List[str] = torch.manual_seed(A )
else:
UpperCAmelCase__ : List[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : Any = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=A ,device=torch.device(A ) ,)
UpperCAmelCase__ : str = floats_tensor(control_image.shape ,rng=random.Random(A ) ).to(A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase__ : List[str] = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((64, 64) )
UpperCAmelCase__ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __lowercase ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = StableDiffusionControlNetImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowercase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(A : Union[str, Any] ):
if isinstance(A ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase__ : int = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(A )
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=A ,set_alpha_to_one=A ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
UpperCAmelCase__ : Optional[Any] = CLIPTextModel(A )
UpperCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase__ : Dict = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase__ : Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowercase ( self : Optional[int] ,A : List[Any] ,A : List[Any]=0 ):
'''simple docstring'''
if str(A ).startswith("""mps""" ):
UpperCAmelCase__ : List[str] = torch.manual_seed(A )
else:
UpperCAmelCase__ : int = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : List[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=A ,device=torch.device(A ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=A ,device=torch.device(A ) ,),
]
UpperCAmelCase__ : List[Any] = floats_tensor(control_image[0].shape ,rng=random.Random(A ) ).to(A )
UpperCAmelCase__ : str = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((64, 64) )
UpperCAmelCase__ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = self.pipeline_class(**A )
pipe.to(A )
UpperCAmelCase__ : Any = 1_0.0
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(A )
UpperCAmelCase__ : Optional[Any] = steps
UpperCAmelCase__ : List[str] = scale
UpperCAmelCase__ : Optional[Any] = pipe(**A )[0]
UpperCAmelCase__ : Any = self.get_dummy_inputs(A )
UpperCAmelCase__ : Optional[int] = steps
UpperCAmelCase__ : Tuple = scale
UpperCAmelCase__ : List[Any] = pipe(**A ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
UpperCAmelCase__ : List[str] = self.get_dummy_inputs(A )
UpperCAmelCase__ : Union[str, Any] = steps
UpperCAmelCase__ : Any = scale
UpperCAmelCase__ : str = pipe(**A ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase__ : Any = self.get_dummy_inputs(A )
UpperCAmelCase__ : List[str] = steps
UpperCAmelCase__ : Union[str, Any] = scale
UpperCAmelCase__ : Union[str, Any] = pipe(**A ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def __lowercase ( self : Any ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_dummy_components()
UpperCAmelCase__ : str = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
UpperCAmelCase__ : Tuple = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,safety_checker=A ,controlnet=A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ : Dict = """evil space-punk bird"""
UpperCAmelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
UpperCAmelCase__ : Optional[Any] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
UpperCAmelCase__ : List[str] = pipe(
A ,A ,control_image=A ,generator=A ,output_type="""np""" ,num_inference_steps=50 ,strength=0.6 ,)
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9e-2
| 194 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( A__ ):
if isinstance(A__, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowercase :
"""simple docstring"""
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = after_output[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : str = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE_ : Any = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE_ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE_ : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE_ : Tuple = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE_ : str = inputs_dict
SCREAMING_SNAKE_CASE_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = pt_model(**lowerCAmelCase__ ).to_tuple()
SCREAMING_SNAKE_CASE_ : Dict = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4E-2 )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_inputs_dict.pop('vision_config' )
SCREAMING_SNAKE_CASE_ : Optional[int] = config_inputs_dict.pop('text_config' )
SCREAMING_SNAKE_CASE_ : Any = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE_ : int = model_a(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = model_a(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = after_outputs[0]
SCREAMING_SNAKE_CASE_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-5 )
@require_flax
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_3
SCREAMING_SNAKE_CASE_ : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = FlaxViTModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ : str = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = 1_3
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = FlaxCLIPVisionModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : int = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
SCREAMING_SNAKE_CASE_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE_ : str = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE_ : Tuple = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 ) )
| 101 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=a , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=a , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=a )
return parser.parse_args()
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE_ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = script_fpath.stem
SCREAMING_SNAKE_CASE_ : int = importlib.import_module(a )
# Patch sys.argv
SCREAMING_SNAKE_CASE_ : int = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 511 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase = logging.get_logger(__name__)
__lowercase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCamelCase =model_type_to_module_name(__UpperCamelCase )
__UpperCamelCase =importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(__UpperCamelCase , __UpperCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__UpperCamelCase , '''__name__''' , __UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCamelCase =importlib.import_module('''transformers''' )
if hasattr(__UpperCamelCase , __UpperCamelCase ):
return getattr(__UpperCamelCase , __UpperCamelCase )
return None
def lowerCAmelCase (__UpperCamelCase : Union[str, os.PathLike] , __UpperCamelCase : Optional[Union[str, os.PathLike]] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[Dict[str, str]] = None , __UpperCamelCase : Optional[Union[bool, str]] = None , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : bool = False , **__UpperCamelCase : int , ):
"""simple docstring"""
__UpperCamelCase =get_file_from_repo(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(__UpperCamelCase , encoding='''utf-8''' ) as reader:
return json.load(__UpperCamelCase )
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[int] ) -> Any:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase__ )
def UpperCAmelCase_ ( cls : Optional[int] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
__UpperCamelCase =kwargs.pop('''config''' , UpperCamelCase__ )
__UpperCamelCase =kwargs.pop('''trust_remote_code''' , UpperCamelCase__ )
__UpperCamelCase =True
__UpperCamelCase , __UpperCamelCase =ImageProcessingMixin.get_image_processor_dict(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =config_dict.get('''image_processor_type''' , UpperCamelCase__ )
__UpperCamelCase =None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
__UpperCamelCase =config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__UpperCamelCase =config_dict.pop('''feature_extractor_type''' , UpperCamelCase__ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
__UpperCamelCase =feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
__UpperCamelCase =config_dict['''auto_map''']['''AutoFeatureExtractor''']
__UpperCamelCase =feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCamelCase =AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# It could be in `config.image_processor_type``
__UpperCamelCase =getattr(UpperCamelCase__ , '''image_processor_type''' , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
__UpperCamelCase =config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
__UpperCamelCase =image_processor_class_from_name(UpperCamelCase__ )
__UpperCamelCase =image_processor_auto_map is not None
__UpperCamelCase =image_processor_class is not None or type(UpperCamelCase__ ) in IMAGE_PROCESSOR_MAPPING
__UpperCamelCase =resolve_trust_remote_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if has_remote_code and trust_remote_code:
__UpperCamelCase =get_class_from_dynamic_module(
UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =kwargs.pop('''code_revision''' , UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase__ ) in IMAGE_PROCESSOR_MAPPING:
__UpperCamelCase =IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase__ )]
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__ )
| 712 | """simple docstring"""
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Any ) -> int:
'''simple docstring'''
__UpperCamelCase =arr.split(''',''' )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =[int(self.array[0] )] * len(self.array )
__UpperCamelCase =[int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__UpperCamelCase =max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__UpperCamelCase =max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__lowercase = input('''please input some numbers:''')
__lowercase = SubArray(whole_array)
__lowercase = array.solve_sub_array()
print(('''the results is:''', re))
| 296 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
'''simple docstring'''
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(_a , _a )
_UpperCAmelCase = self.image_processor
def __call__( self : Any , lowerCamelCase : Union[str, Any] = None , lowerCamelCase : int = None , lowerCamelCase : Optional[int] = True , lowerCamelCase : Union[str, Any] = False , lowerCamelCase : List[str] = None , lowerCamelCase : List[Any] = None , lowerCamelCase : Any = 0 , lowerCamelCase : List[str] = None , lowerCamelCase : Tuple = None , lowerCamelCase : Dict = False , lowerCamelCase : List[Any] = False , lowerCamelCase : Any = False , lowerCamelCase : str = False , lowerCamelCase : Optional[int] = False , lowerCamelCase : List[str] = True , lowerCamelCase : Tuple = None , **lowerCamelCase : Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_UpperCAmelCase = self.image_processor(_a , return_tensors=_a )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def lowerCamelCase ( self : Any , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a )
def lowerCamelCase ( self : int , *lowerCamelCase : Dict , **lowerCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a )
@property
def lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 108 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''efficientnet'''
def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.2_5 , _a = "swish" , _a = 2560 , _a = "mean" , _a = 0.0_2 , _a = 0.0_0_1 , _a = 0.9_9 , _a = 0.5 , _a = 0.2 , **_a , ) -> List[str]:
super().__init__(**_a )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = width_coefficient
lowerCAmelCase_ = depth_coefficient
lowerCAmelCase_ = depth_divisor
lowerCAmelCase_ = kernel_sizes
lowerCAmelCase_ = in_channels
lowerCAmelCase_ = out_channels
lowerCAmelCase_ = depthwise_padding
lowerCAmelCase_ = strides
lowerCAmelCase_ = num_block_repeats
lowerCAmelCase_ = expand_ratios
lowerCAmelCase_ = squeeze_expansion_ratio
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = pooling_type
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = batch_norm_eps
lowerCAmelCase_ = batch_norm_momentum
lowerCAmelCase_ = dropout_rate
lowerCAmelCase_ = drop_connect_rate
lowerCAmelCase_ = sum(_a ) * 4
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __a ( self ) -> float:
return 1E-5
| 122 | 0 |
import argparse
import datetime
def snake_case_ ( snake_case ) -> str:
lowercase__: int = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
lowercase__: Optional[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
lowercase__: int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
lowercase__: str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
lowercase__: int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
lowercase__: str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
lowercase__: int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
lowercase__: Any = datetime.date(int(snake_case ) , int(snake_case ) , int(snake_case ) )
# Start math
if m <= 2:
lowercase__: str = y - 1
lowercase__: Union[str, Any] = m + 12
# maths var
lowercase__: int = int(str(snake_case )[:2] )
lowercase__: int = int(str(snake_case )[2:] )
lowercase__: int = int(2.6 * m - 5.3_9 )
lowercase__: int = int(c / 4 )
lowercase__: int = int(k / 4 )
lowercase__: int = int(d + k )
lowercase__: int = int(t + u + v + x )
lowercase__: int = int(z - (2 * c) )
lowercase__: int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
lowercase__: str = f'Your date {date_input}, is a {days[str(snake_case )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
__lowerCAmelCase = parser.parse_args()
zeller(args.date_input)
| 701 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __a :
__lowercase : float
__lowercase : TreeNode | None = None
__lowercase : TreeNode | None = None
def snake_case_ ( snake_case ) -> bool:
# Validation
def is_valid_tree(snake_case ) -> bool:
if node is None:
return True
if not isinstance(snake_case , snake_case ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
snake_case , snake_case , snake_case ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case )
)
return is_binary_search_tree_recursive_check(snake_case , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = torch.nn.Linear(10 , 10 )
lowerCAmelCase_ = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
try:
pickle.loads(pickle.dumps(_lowerCamelCase ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 274 | '''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def snake_case_ ( __snake_case : Tuple=None , __snake_case : int=None) -> int:
return field(default_factory=lambda: default , metadata=__snake_case)
@dataclass
class __UpperCAmelCase :
__A : str = field(
metadata={'help': 'The csv file to plot.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Disable logarithmic scale when plotting'} , )
__A : bool = field(
default=__a , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
__A : Optional[str] = field(
default=__a , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
__A : Optional[List[str]] = list_field(
default=__a , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def snake_case_ ( __snake_case : Optional[Any]) -> Dict:
try:
int(__snake_case)
return True
except ValueError:
return False
def snake_case_ ( __snake_case : Dict) -> int:
try:
float(__snake_case)
return True
except ValueError:
return False
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase ):
lowerCAmelCase_ = args
lowerCAmelCase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
lowerCAmelCase_ = csv.DictReader(_lowerCamelCase )
for row in reader:
lowerCAmelCase_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
lowerCAmelCase_ = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
lowerCAmelCase_ = float(row['''result'''] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = plt.subplots()
lowerCAmelCase_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
lowerCAmelCase_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
lowerCAmelCase_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
lowerCAmelCase_ = self.result_dict[model_name]['''result''']
((lowerCAmelCase_) ,(lowerCAmelCase_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCamelCase , )
else:
lowerCAmelCase_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCAmelCase_) ,(lowerCAmelCase_)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
lowerCAmelCase_ = np.asarray(_lowerCamelCase , _lowerCamelCase )[: len(_lowerCamelCase )]
plt.scatter(
_lowerCamelCase , _lowerCamelCase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_lowerCamelCase , _lowerCamelCase , '''--''' )
title_str += F''' {label_model_name} vs.'''
lowerCAmelCase_ = title_str[:-4]
lowerCAmelCase_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_lowerCamelCase )
plt.xlabel(_lowerCamelCase )
plt.ylabel(_lowerCamelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def snake_case_ ( ) -> Tuple:
lowerCAmelCase_ = HfArgumentParser(__snake_case)
lowerCAmelCase_ = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ = Plot(args=__snake_case)
plot.plot()
if __name__ == "__main__":
main()
| 274 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
snake_case_ = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
snake_case_ = """▁"""
class a__ ( _lowercase ):
__magic_name__ : int = VOCAB_FILES_NAMES
__magic_name__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__(self : int, __UpperCAmelCase : Any, __UpperCAmelCase : List[str]="<s>", __UpperCAmelCase : int="</s>", __UpperCAmelCase : Optional[Any]="</s>", __UpperCAmelCase : int="<s>", __UpperCAmelCase : str="<unk>", __UpperCAmelCase : str="<pad>", __UpperCAmelCase : Dict="<mask>", __UpperCAmelCase : Optional[Dict[str, Any]] = None, **__UpperCAmelCase : int, ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(__UpperCAmelCase, lstrip=__UpperCAmelCase, rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase, __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase, eos_token=__UpperCAmelCase, unk_token=__UpperCAmelCase, sep_token=__UpperCAmelCase, cls_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, mask_token=__UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **__UpperCAmelCase, )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
SCREAMING_SNAKE_CASE : int = len(self.sp_model ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase__ (self : List[str], __UpperCAmelCase : List[int], __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ (self : List[str], __UpperCAmelCase : List[int], __UpperCAmelCase : Optional[List[int]] = None, __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase, token_ids_a=__UpperCAmelCase, already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowercase__ (self : Tuple, __UpperCAmelCase : List[int], __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ (self : Tuple ) -> Dict:
"""simple docstring"""
return len(self.sp_model )
def lowercase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ (self : List[str], __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase, out_type=__UpperCAmelCase )
def lowercase__ (self : int, __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.PieceToId(__UpperCAmelCase )
return spm_id if spm_id else self.unk_token_id
def lowercase__ (self : Optional[Any], __UpperCAmelCase : str ) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__UpperCAmelCase )
def lowercase__ (self : Any, __UpperCAmelCase : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : str = ''''''
SCREAMING_SNAKE_CASE : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : int = []
else:
current_sub_tokens.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __getstate__(self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__(self : Tuple, __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : str, __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
__UpperCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase, '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 355 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Optional[Any], *__UpperCAmelCase : List[Any], **__UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : int, **__UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : List[str], *__UpperCAmelCase : str, **__UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[str] = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Union[str, Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : List[Any], **__UpperCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : int, **__UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : str, *__UpperCAmelCase : str, **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[str] = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : List[str], **__UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Tuple, **__UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : str, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[Any] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : int, **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Any, *__UpperCAmelCase : str, **__UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Any, **__UpperCAmelCase : str ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : Dict, **__UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : List[str], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : Tuple, **__UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : str ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Any, **__UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : int ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : int ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
| 355 | 1 |
import datasets
a__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
a__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
a__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __lowercase ( self , _a , _a ) -> Optional[Any]:
return {"accuracy": simple_accuracy(_a , _a )}
| 14 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case (__SCREAMING_SNAKE_CASE):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case ,"width_multiplier" ) )
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=64 ,_snake_case=2 ,_snake_case=3 ,_snake_case="swish" ,_snake_case=3 ,_snake_case=32 ,_snake_case=0.1 ,_snake_case=0.02 ,_snake_case=True ,_snake_case=True ,_snake_case=10 ,_snake_case=None ,_snake_case=0.25 ,_snake_case=0.0 ,_snake_case=0.0 ,):
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = make_divisible(5_12 * width_multiplier ,divisor=8 )
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = conv_kernel_size
UpperCAmelCase_ : Optional[int] = output_stride
UpperCAmelCase_ : str = classifier_dropout_prob
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[str] = num_labels
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : Optional[int] = width_multiplier
UpperCAmelCase_ : List[Any] = ffn_dropout
UpperCAmelCase_ : Tuple = attn_dropout
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
UpperCAmelCase_ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase__ ( self ):
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Any = MobileViTVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileViTVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Dict = MobileViTVaForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Any = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
UpperCAmelCase_ : Tuple = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ : int = config_and_inputs
UpperCAmelCase_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__A : List[str] =(
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : Dict =False
__A : Optional[int] =False
__A : int =False
__A : Optional[int] =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = MobileViTVaModelTester(self )
UpperCAmelCase_ : Optional[Any] = MobileViTVaConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def UpperCamelCase__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(_snake_case )
UpperCAmelCase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Dict = 5
self.assertEqual(len(_snake_case ) ,_snake_case )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ : str = 2
for i in range(len(_snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = MobileViTVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def a__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case (unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_snake_case )
UpperCAmelCase_ : Optional[Any] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**_snake_case )
# verify the logits
UpperCAmelCase_ : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,_snake_case )
UpperCAmelCase_ : Any = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : Union[str, Any] = model.to(_snake_case )
UpperCAmelCase_ : int = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Dict = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**_snake_case )
UpperCAmelCase_ : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase_ : Optional[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,_snake_case )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] ,device=_snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_snake_case ,atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : Any = model.to(_snake_case )
UpperCAmelCase_ : Dict = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : str = prepare_img()
UpperCAmelCase_ : int = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_snake_case )
UpperCAmelCase_ : Union[str, Any] = outputs.logits.detach().cpu()
UpperCAmelCase_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=_snake_case ,target_sizes=[(50, 60)] )
UpperCAmelCase_ : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,_snake_case )
UpperCAmelCase_ : Any = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
UpperCAmelCase_ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,_snake_case )
| 710 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def a__ ( _SCREAMING_SNAKE_CASE : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
UpperCAmelCase_ : Tuple = {"+", "-", "*", "/"}
UpperCAmelCase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_SCREAMING_SNAKE_CASE ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
_SCREAMING_SNAKE_CASE : Any = len(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : int = max(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Dict = min(SCREAMING_SNAKE_CASE__ )
# create the counting array
_SCREAMING_SNAKE_CASE : Optional[Any] = coll_max + 1 - coll_min
_SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : List[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_SCREAMING_SNAKE_CASE : Tuple = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , SCREAMING_SNAKE_CASE__ ) ):
_SCREAMING_SNAKE_CASE : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return "".join([chr(SCREAMING_SNAKE_CASE__ ) for i in counting_sort([ord(SCREAMING_SNAKE_CASE__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
UpperCAmelCase_ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : List[str] = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 533 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = KandinskyVaaPipeline
A_ : List[str] = [
"""image_embeds""",
"""negative_image_embeds""",
]
A_ : Optional[int] = ["""image_embeds""", """negative_image_embeds"""]
A_ : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A_ : Dict = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 100
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(**__snake_case )
return model
@property
def UpperCAmelCase_ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = self.dummy_unet
_SCREAMING_SNAKE_CASE : List[str] = self.dummy_movq
_SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
_SCREAMING_SNAKE_CASE : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase_ ( self , __snake_case , __snake_case=0 ):
_SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
if str(__snake_case ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE : str = torch.manual_seed(__snake_case )
else:
_SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = """cpu"""
_SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class(**__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_SCREAMING_SNAKE_CASE : Any = pipe(**self.get_dummy_inputs(__snake_case ) )
_SCREAMING_SNAKE_CASE : Tuple = output.images
_SCREAMING_SNAKE_CASE : Dict = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
_SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE : Dict = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
_SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
_SCREAMING_SNAKE_CASE : Dict = """red cat, 4k photo"""
_SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_SCREAMING_SNAKE_CASE : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[str] = pipeline(
image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 533 | 1 |
__magic_name__ ='''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __UpperCamelCase ( ):
UpperCamelCase__ = input('''Enter message: ''' )
UpperCamelCase__ = input('''Enter key [alphanumeric]: ''' )
UpperCamelCase__ = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCamelCase__ = '''encrypt'''
UpperCamelCase__ = encrypt_message(A , A )
elif mode.lower().startswith('''d''' ):
UpperCamelCase__ = '''decrypt'''
UpperCamelCase__ = decrypt_message(A , A )
print(f"\n{mode.title()}ed message:" )
print(A )
def __UpperCamelCase ( A , A ):
return translate_message(A , A , '''encrypt''' )
def __UpperCamelCase ( A , A ):
return translate_message(A , A , '''decrypt''' )
def __UpperCamelCase ( A , A , A ):
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = key.upper()
for symbol in message:
UpperCamelCase__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(A )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(A ):
UpperCamelCase__ = 0
else:
translated.append(A )
return "".join(A )
if __name__ == "__main__":
main()
| 469 | from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _A ( __UpperCamelCase , __UpperCamelCase ):
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 768 , ) -> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase__ = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE_ ) )
def _a (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
return self
def _a (self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def _a (self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ = (embeds * self.std) + self.mean
return embeds
| 469 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class __snake_case ( UpperCamelCase_ ):
def __init__( self : List[Any] , *A_ : Union[str, Any] , **A_ : Union[str, Any]):
super().__init__(*A_ , **A_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def UpperCAmelCase__ ( self : Any , A_ : int=None):
lowerCAmelCase_ : List[Any] = {}
if top_k is not None:
lowerCAmelCase_ : Dict = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[Any] , A_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **A_ : int):
return super().__call__(A_ , **A_)
def UpperCAmelCase__ ( self : str , A_ : Dict):
lowerCAmelCase_ : Tuple = load_image(A_)
lowerCAmelCase_ : Union[str, Any] = self.image_processor(images=A_ , return_tensors=self.framework)
return model_inputs
def UpperCAmelCase__ ( self : List[Any] , A_ : Any):
lowerCAmelCase_ : Optional[int] = self.model(**A_)
return model_outputs
def UpperCAmelCase__ ( self : int , A_ : List[str] , A_ : str=5):
if top_k > self.model.config.num_labels:
lowerCAmelCase_ : List[str] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ : Dict = model_outputs.logits.softmax(-1)[0]
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = probs.topk(A_)
elif self.framework == "tf":
lowerCAmelCase_ : Union[str, Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
lowerCAmelCase_ : Union[str, Any] = tf.math.top_k(A_ , k=A_)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
lowerCAmelCase_ : Dict = scores.tolist()
lowerCAmelCase_ : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A_ , A_)]
| 171 |
from collections import deque
class __snake_case :
def __init__( self : Union[str, Any] , A_ : str , A_ : int , A_ : int):
lowerCAmelCase_ : str = process_name # process name
lowerCAmelCase_ : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase_ : str = arrival_time
lowerCAmelCase_ : List[Any] = burst_time # remaining burst time
lowerCAmelCase_ : int = 0 # total time of the process wait in ready queue
lowerCAmelCase_ : Dict = 0 # time from arrival time to completion time
class __snake_case :
def __init__( self : Any , A_ : int , A_ : list[int] , A_ : deque[Process] , A_ : int , ):
# total number of mlfq's queues
lowerCAmelCase_ : Union[str, Any] = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase_ : str = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase_ : Dict = queue
# current time
lowerCAmelCase_ : Dict = current_time
# finished process is in this sequence queue
lowerCAmelCase_ : deque[Process] = deque()
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : int = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def UpperCAmelCase__ ( self : int , A_ : list[Process]):
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(A_)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def UpperCAmelCase__ ( self : List[str] , A_ : list[Process]):
lowerCAmelCase_ : Any = []
for i in range(len(A_)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def UpperCAmelCase__ ( self : List[str] , A_ : list[Process]):
lowerCAmelCase_ : str = []
for i in range(len(A_)):
completion_times.append(queue[i].stop_time)
return completion_times
def UpperCAmelCase__ ( self : Dict , A_ : deque[Process]):
return [q.burst_time for q in queue]
def UpperCAmelCase__ ( self : str , A_ : Process):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase__ ( self : Optional[int] , A_ : deque[Process]):
lowerCAmelCase_ : deque[Process] = deque() # sequence deque of finished process
while len(A_) != 0:
lowerCAmelCase_ : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(A_)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase_ : Optional[int] = 0
# set the process's turnaround time because it is finished
lowerCAmelCase_ : str = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase_ : Tuple = self.current_time
# add the process to queue that has finished queue
finished.append(A_)
self.finish_queue.extend(A_) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase__ ( self : Dict , A_ : deque[Process] , A_ : int):
lowerCAmelCase_ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(A_)):
lowerCAmelCase_ : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(A_)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase_ : Any = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(A_)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase_ : Any = 0
# set the finish time
lowerCAmelCase_ : Optional[Any] = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase_ : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(A_)
self.finish_queue.extend(A_) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase__ ( self : Tuple):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.round_robin(
self.ready_queue , self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
A__ : Any = Process('''P1''', 0, 53)
A__ : Tuple = Process('''P2''', 0, 17)
A__ : List[Any] = Process('''P3''', 0, 68)
A__ : Dict = Process('''P4''', 0, 24)
A__ : str = 3
A__ : Dict = [17, 25]
A__ : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
A__ : Optional[int] = Process('''P1''', 0, 53)
A__ : str = Process('''P2''', 0, 17)
A__ : Optional[int] = Process('''P3''', 0, 68)
A__ : Union[str, Any] = Process('''P4''', 0, 24)
A__ : Tuple = 3
A__ : Dict = [17, 25]
A__ : int = deque([Pa, Pa, Pa, Pa])
A__ : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
A__ : Optional[int] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 171 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 703 | '''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
import argparse
import os
import re
import packaging.version
lowercase_ = """examples/"""
lowercase_ = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowercase_ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
lowercase_ = """README.md"""
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[Any] = f.read()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = REPLACE_PATTERNS[pattern]
__SCREAMING_SNAKE_CASE : List[Any] = replace.replace('''VERSION''' , snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = re_pattern.sub(snake_case , snake_case )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(snake_case )
def a__ ( snake_case ):
"""simple docstring"""
for folder, directories, fnames in os.walk(snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(snake_case , snake_case ) , snake_case , pattern='''examples''' )
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case , snake_case , snake_case )
if not patch:
update_version_in_examples(snake_case )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Find the start of the list.
__SCREAMING_SNAKE_CASE : Any = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE : Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__SCREAMING_SNAKE_CASE : int = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
def a__ ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : Union[str, Any] = f.read()
__SCREAMING_SNAKE_CASE : Union[str, Any] = REPLACE_PATTERNS['''init'''][0].search(snake_case ).groups()[0]
return packaging.version.parse(snake_case )
def a__ ( snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__SCREAMING_SNAKE_CASE : List[Any] = default_version.base_version
elif patch:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__SCREAMING_SNAKE_CASE : List[Any] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__SCREAMING_SNAKE_CASE : Any = input(F'''Which version are you releasing? [{default_version}]''' )
if len(snake_case ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = default_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case , patch=snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = get_version()
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__SCREAMING_SNAKE_CASE : Optional[Any] = current_version.base_version
# Check with the user we got that right.
__SCREAMING_SNAKE_CASE : Dict = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(snake_case ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowercase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 74 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[int]:
# Construct model
if gpta_config_file == "":
_snake_case = GPTaConfig()
else:
_snake_case = GPTaConfig.from_json_file(__A )
_snake_case = GPTaModel(__A )
# Load weights from numpy
load_tf_weights_in_gpta(__A , __A , __A )
# Save pytorch-model
_snake_case = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_snake_case = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , __A )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
lowercase : List[str] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 542 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase : Optional[Any] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowercase : List[Any] = {
"facebook/blenderbot_small-90M": 512,
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = BlenderbotSmallTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCAmelCase_ , merges=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , ) , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = add_prefix_space
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 542 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.