code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCamelCase :Optional[int] = trt.Logger(trt.Logger.WARNING) lowerCamelCase :Dict = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCamelCase :Tuple = logging.getLogger(__name__) lowerCamelCase :Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_8_4, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_2_8, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=2_0, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=3_0, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=4_2, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) lowerCamelCase :Optional[Any] = parser.parse_args() if args.tokenizer_name: lowerCamelCase :Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) lowerCamelCase :Dict = args.per_device_eval_batch_size lowerCamelCase :List[str] = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCamelCase :Dict = True lowerCamelCase :str = '''temp_engine/bert-fp32.engine''' if args.fpaa: lowerCamelCase :str = '''temp_engine/bert-fp16.engine''' if args.inta: lowerCamelCase :int = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') lowerCamelCase :List[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCamelCase :Dict = [network.get_input(i) for i in range(network.num_inputs)] lowerCamelCase :List[str] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCamelCase :List[str] = 1 << 5_0 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCamelCase :int = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCamelCase :Optional[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) A_ : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase__ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase__ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase__ ) # start time A_ : List[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCamelCase__ ) for d_inp in d_inputs] + [int(lowerCamelCase__ ), int(lowerCamelCase__ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Synchronize the stream and take time stream.synchronize() # end time A_ : List[Any] = time.time() A_ : Optional[Any] = end_time - start_time A_ : Optional[Any] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCamelCase :Dict = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase :Dict = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCamelCase :Optional[int] = raw_datasets['''validation'''].column_names lowerCamelCase :Tuple = '''question''' if '''question''' in column_names else column_names[0] lowerCamelCase :Any = '''context''' if '''context''' in column_names else column_names[1] lowerCamelCase :Optional[int] = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCamelCase :List[str] = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) lowerCamelCase :Tuple = min(args.max_seq_length, tokenizer.model_max_length) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Tuple = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. A_ : Optional[Any] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase__ , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. A_ : str = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. A_ : Any = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). A_ : Optional[int] = tokenized_examples.sequence_ids(lowerCamelCase__ ) A_ : int = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. A_ : Tuple = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. A_ : Union[str, Any] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples lowerCamelCase :Optional[int] = raw_datasets['''validation'''] # Validation Feature Creation lowerCamelCase :str = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) lowerCamelCase :Optional[Any] = default_data_collator lowerCamelCase :Union[str, Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) lowerCamelCase :Optional[int] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="eval" ): '''simple docstring''' A_ : Any = postprocess_qa_predictions( examples=lowerCamelCase__ , features=lowerCamelCase__ , predictions=lowerCamelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase__ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: A_ : List[str] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: A_ : Any = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCamelCase__ , label_ids=lowerCamelCase__ ) lowerCamelCase :Optional[int] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def a ( lowerCamelCase__ ): '''simple docstring''' return trt.volume(engine.get_binding_shape(lowerCamelCase__ ) ) * engine.get_binding_dtype(lowerCamelCase__ ).itemsize # Allocate device memory for inputs and outputs. lowerCamelCase :List[Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCamelCase :Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCamelCase :str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCamelCase :str = cuda.mem_alloc(h_outputa.nbytes) lowerCamelCase :Optional[int] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCamelCase :Optional[Any] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(F" Num examples = {len(eval_dataset)}") logger.info(F" Batch size = {args.per_device_eval_batch_size}") lowerCamelCase :Optional[Any] = 0.0 lowerCamelCase :int = 0 lowerCamelCase :str = timeit.default_timer() lowerCamelCase :Optional[Any] = None for step, batch in enumerate(eval_dataloader): lowerCamelCase , lowerCamelCase :Dict = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCamelCase , lowerCamelCase :str = outputs lowerCamelCase :Optional[Any] = torch.tensor(start_logits) lowerCamelCase :Optional[int] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCamelCase :Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0) lowerCamelCase :Optional[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0) lowerCamelCase :Optional[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCamelCase :Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0) if all_preds is not None: lowerCamelCase :Dict = nested_truncate(all_preds, len(eval_dataset)) lowerCamelCase :Any = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0)) logger.info('''Total Number of Inference = %d''', niter) lowerCamelCase :Union[str, Any] = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCamelCase :Tuple = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F"Evaluation metrics: {eval_metric}")
686
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCamelCase :Any = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
686
1
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCamelCase :Optional[int] = logging.get_logger(__name__) lowerCamelCase :Optional[Any] = '''▁''' lowerCamelCase :List[str] = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCamelCase :List[Any] = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } lowerCamelCase :Tuple = { '''facebook/m2m100_418M''': 1_0_2_4, } # fmt: off lowerCamelCase :List[str] = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE : List[int] = [] __SCREAMING_SNAKE_CASE : List[int] = [] def __init__(self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<pad>" , lowercase="<unk>" , lowercase="m2m100" , lowercase = None , lowercase=8 , **lowercase , ): A_ : int = {} if sp_model_kwargs is None else sp_model_kwargs A_ : Any = language_codes A_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes] A_ : int = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code} A_ : int = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(lowercase ) for lang_code in fairseq_language_code if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , ) A_ : List[Any] = vocab_file A_ : Optional[int] = load_json(lowercase ) A_ : str = {v: k for k, v in self.encoder.items()} A_ : Optional[Any] = spm_file A_ : List[Any] = load_spm(lowercase , self.sp_model_kwargs ) A_ : int = len(self.encoder ) A_ : List[str] = { self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase ) } A_ : int = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )} A_ : List[str] = {v: k for k, v in self.lang_token_to_id.items()} A_ : Dict = src_lang if src_lang is not None else """en""" A_ : Tuple = tgt_lang A_ : Optional[int] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) A_ : Union[str, Any] = num_madeup_words @property def _a (self ): return len(self.encoder ) + len(self.lang_token_to_id ) @property def _a (self ): return self._src_lang @src_lang.setter def _a (self , lowercase ): A_ : Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _a (self , lowercase ): return self.sp_model.encode(lowercase , out_type=lowercase ) def _a (self , lowercase ): if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(lowercase , self.encoder[self.unk_token] ) def _a (self , lowercase ): if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(lowercase , self.unk_token ) def _a (self , lowercase ): A_ : str = [] A_ : Any = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase ) + token A_ : str = [] else: current_sub_tokens.append(lowercase ) out_string += self.sp_model.decode(lowercase ) return out_string.strip() def _a (self , lowercase , lowercase = None , lowercase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase ) A_ : str = [1] * len(self.prefix_tokens ) A_ : Dict = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowercase )) + suffix_ones return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones def _a (self , lowercase , lowercase = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _a (self ): A_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): A_ : Any = self.__dict__.copy() A_ : Any = None return state def __setstate__(self , lowercase ): A_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A_ : int = {} A_ : Dict = load_spm(self.spm_file , self.sp_model_kwargs ) def _a (self , lowercase , lowercase = None ): A_ : Optional[Any] = Path(lowercase ) if not save_dir.is_dir(): raise OSError(F'{save_directory} should be a directory' ) A_ : Optional[int] = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) A_ : List[str] = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , lowercase ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowercase ) elif not os.path.isfile(self.spm_file ): with open(lowercase , """wb""" ) as fi: A_ : Any = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (str(lowercase ), str(lowercase )) def _a (self , lowercase , lowercase = "en" , lowercase = None , lowercase = "ro" , **lowercase , ): A_ : Any = src_lang A_ : Optional[Any] = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase , **lowercase ): if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) A_ : Optional[int] = src_lang A_ : Optional[Any] = self(lowercase , add_special_tokens=lowercase , **lowercase ) A_ : int = self.get_lang_id(lowercase ) A_ : Dict = tgt_lang_id return inputs def _a (self ): self.set_src_lang_special_tokens(self.src_lang ) def _a (self ): self.set_tgt_lang_special_tokens(self.tgt_lang ) def _a (self , lowercase ): A_ : List[Any] = self.get_lang_token(lowercase ) A_ : Union[str, Any] = self.lang_token_to_id[lang_token] A_ : List[str] = [self.cur_lang_id] A_ : Union[str, Any] = [self.eos_token_id] def _a (self , lowercase ): A_ : List[str] = self.get_lang_token(lowercase ) A_ : List[str] = self.lang_token_to_id[lang_token] A_ : List[Any] = [self.cur_lang_id] A_ : List[str] = [self.eos_token_id] def _a (self , lowercase ): return self.lang_code_to_token[lang] def _a (self , lowercase ): A_ : Optional[Any] = self.get_lang_token(lowercase ) return self.lang_token_to_id[lang_token] def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : str = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def a ( lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ , """r""" ) as f: return json.load(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ , """w""" ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
686
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCamelCase :Any = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def a ( lowerCamelCase__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCamelCase :Tuple = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCamelCase :List[Any] = parser.parse_args() if args.check_lib: lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''') lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent else: lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
686
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : List[str] = LEDConfig __SCREAMING_SNAKE_CASE : Dict = {} __SCREAMING_SNAKE_CASE : List[str] = 'gelu' def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=4 , ): A_ : List[str] = parent A_ : Optional[int] = batch_size A_ : Any = seq_length A_ : Optional[Any] = is_training A_ : Optional[int] = use_labels A_ : Optional[int] = vocab_size A_ : Any = hidden_size A_ : Optional[Any] = num_hidden_layers A_ : int = num_attention_heads A_ : str = intermediate_size A_ : Dict = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Optional[int] = max_position_embeddings A_ : List[Any] = eos_token_id A_ : Optional[Any] = pad_token_id A_ : Union[str, Any] = bos_token_id A_ : Optional[Any] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after A_ : int = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests A_ : Optional[int] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def _a (self ): A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A_ : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A_ : str = tf.concat([input_ids, eos_tensor] , axis=1 ) A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : int = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) A_ : Any = prepare_led_inputs_dict(lowercase , lowercase , lowercase ) A_ : Optional[Any] = tf.concat( [tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , ) A_ : Optional[int] = global_attention_mask return config, inputs_dict def _a (self , lowercase , lowercase ): A_ : Any = TFLEDModel(config=lowercase ).get_decoder() A_ : str = inputs_dict["""input_ids"""] A_ : Any = input_ids[:1, :] A_ : Any = inputs_dict["""attention_mask"""][:1, :] A_ : Any = 1 # first forward pass A_ : List[Any] = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) A_, A_ : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A_ : Any = tf.concat([input_ids, next_tokens] , axis=-1 ) A_ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A_ : List[Any] = model(lowercase , attention_mask=lowercase )[0] A_ : str = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A_ : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx] A_ : Optional[int] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ): '''simple docstring''' if attention_mask is None: A_ : Optional[int] = tf.cast(tf.math.not_equal(lowerCamelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A_ : int = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A_ : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () __SCREAMING_SNAKE_CASE : str = (TFLEDForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE : Optional[Any] = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE : List[Any] = True __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : int = False def _a (self ): A_ : int = TFLEDModelTester(self ) A_ : str = ConfigTester(self , config_class=lowercase ) def _a (self ): self.config_tester.run_common_tests() def _a (self ): A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) def _a (self ): A_, A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() A_ : int = tf.zeros_like(inputs_dict["""attention_mask"""] ) A_ : Optional[Any] = 2 A_ : List[Any] = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , ) A_ : Union[str, Any] = True A_ : str = self.model_tester.seq_length A_ : List[Any] = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase ): A_ : int = outputs.decoder_attentions self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase ): A_ : Any = [t.numpy() for t in outputs.encoder_attentions] A_ : int = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: A_ : List[str] = True A_ : Optional[Any] = False A_ : List[Any] = False A_ : Union[str, Any] = model_class(lowercase ) A_ : List[str] = model(self._prepare_for_class(lowercase , lowercase ) ) A_ : List[str] = len(lowercase ) self.assertEqual(config.output_hidden_states , lowercase ) check_encoder_attentions_output(lowercase ) if self.is_encoder_decoder: A_ : List[Any] = model_class(lowercase ) A_ : Tuple = model(self._prepare_for_class(lowercase , lowercase ) ) self.assertEqual(config.output_hidden_states , lowercase ) check_decoder_attentions_output(lowercase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A_ : str = True A_ : Optional[int] = model_class(lowercase ) A_ : Optional[Any] = model(self._prepare_for_class(lowercase , lowercase ) ) self.assertEqual(config.output_hidden_states , lowercase ) check_encoder_attentions_output(lowercase ) # Check attention is always last and order is fine A_ : str = True A_ : Tuple = True A_ : List[str] = model_class(lowercase ) A_ : int = model(self._prepare_for_class(lowercase , lowercase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) ) self.assertEqual(model.config.output_hidden_states , lowercase ) check_encoder_attentions_output(lowercase ) @unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" ) def _a (self ): pass def _a (self ): # TODO: Head-masking not yet implement pass def a ( lowerCamelCase__ ): '''simple docstring''' return tf.constant(lowerCamelCase__ , dtype=tf.intaa ) lowerCamelCase :str = 1E-4 @slow @require_tf class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led # change to intended input here A_ : Dict = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ : Tuple = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ : List[Any] = prepare_led_inputs_dict(model.config , lowercase , lowercase ) A_ : Dict = model(**lowercase )[0] A_ : int = (1, 1024, 768) self.assertEqual(output.shape , lowercase ) # change to expected output here A_ : Optional[int] = tf.convert_to_tensor( [[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 ) def _a (self ): A_ : Dict = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ) # change to intended input here A_ : List[str] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ : List[str] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ : Dict = prepare_led_inputs_dict(model.config , lowercase , lowercase ) A_ : List[Any] = model(**lowercase )[0] A_ : Optional[Any] = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , lowercase ) # change to expected output here A_ : Any = tf.convert_to_tensor( [[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 )
686
'''simple docstring''' lowerCamelCase :dict[tuple[int, int, int], int] = {} def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on A_ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 ) A_ : Optional[Any] = state_late + state_absent + state_ontime A_ : Dict = prizestrings return prizestrings def a ( lowerCamelCase__ = 30 ): '''simple docstring''' return _calculate(lowerCamelCase__ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
686
1
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": lowerCamelCase :Union[str, Any] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') lowerCamelCase :Union[str, Any] = F"https://www.google.com/search?q={query}&num=100" lowerCamelCase :int = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: lowerCamelCase :int = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: lowerCamelCase :Optional[int] = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
686
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Tuple = 'linear' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial' __SCREAMING_SNAKE_CASE : Optional[int] = 'constant' __SCREAMING_SNAKE_CASE : str = 'constant_with_warmup' __SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant' def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) ) return 1.0 return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' A_ : Optional[Any] = {} A_ : Optional[Any] = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: A_, A_ : Union[str, Any] = rule_str.split(""":""" ) A_ : Union[str, Any] = int(lowerCamelCase__ ) A_ : List[Any] = float(lowerCamelCase__ ) A_ : Union[str, Any] = value A_ : Optional[int] = float(rule_list[-1] ) def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ): def rule_func(lowerCamelCase__ ) -> float: A_ : str = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCamelCase__ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ): '''simple docstring''' A_ : Optional[Any] = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' ) def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A_ : str = lr_init - lr_end A_ : Tuple = num_training_steps - num_warmup_steps A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps A_ : Optional[int] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase :List[Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ): '''simple docstring''' A_ : Optional[Any] = SchedulerType(lowerCamelCase__ ) A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , ) return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
686
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer lowerCamelCase :List[str] = logging.get_logger(__name__) lowerCamelCase :List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase :Any = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } lowerCamelCase :Dict = { '''bert-base-uncased''': 5_1_2, '''bert-large-uncased''': 5_1_2, '''bert-base-cased''': 5_1_2, '''bert-large-cased''': 5_1_2, '''bert-base-multilingual-uncased''': 5_1_2, '''bert-base-multilingual-cased''': 5_1_2, '''bert-base-chinese''': 5_1_2, '''bert-base-german-cased''': 5_1_2, '''bert-large-uncased-whole-word-masking''': 5_1_2, '''bert-large-cased-whole-word-masking''': 5_1_2, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_1_2, '''bert-large-cased-whole-word-masking-finetuned-squad''': 5_1_2, '''bert-base-cased-finetuned-mrpc''': 5_1_2, '''bert-base-german-dbmdz-cased''': 5_1_2, '''bert-base-german-dbmdz-uncased''': 5_1_2, '''TurkuNLP/bert-base-finnish-cased-v1''': 5_1_2, '''TurkuNLP/bert-base-finnish-uncased-v1''': 5_1_2, '''wietsedv/bert-base-dutch-cased''': 5_1_2, } lowerCamelCase :Tuple = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : str = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer def __init__(self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ): super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) A_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars ): A_ : Any = getattr(lowercase , normalizer_state.pop("""type""" ) ) A_ : int = do_lower_case A_ : List[Any] = strip_accents A_ : Optional[int] = tokenize_chinese_chars A_ : Tuple = normalizer_class(**lowercase ) A_ : Union[str, Any] = do_lower_case def _a (self , lowercase , lowercase=None ): A_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a (self , lowercase , lowercase = None ): A_ : List[str] = [self.sep_token_id] A_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a (self , lowercase , lowercase = None ): A_ : Any = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
686
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''') lowerCamelCase :int = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } lowerCamelCase :List[str] = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } lowerCamelCase :Union[str, Any] = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } lowerCamelCase :Dict = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } lowerCamelCase :int = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } lowerCamelCase :str = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } lowerCamelCase :List[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } lowerCamelCase :Tuple = [] lowerCamelCase :Dict = [] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for attribute in key.split(""".""" ): A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: A_ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Dict = value elif weight_type == "bias": A_ : Dict = value elif weight_type == "running_mean": A_ : Optional[Any] = value elif weight_type == "running_var": A_ : int = value elif weight_type == "num_batches_tracked": A_ : Optional[Any] = value elif weight_type == "weight_ih_l0": A_ : Optional[int] = value elif weight_type == "weight_hh_l0": A_ : Union[str, Any] = value elif weight_type == "bias_ih_l0": A_ : Optional[int] = value elif weight_type == "bias_hh_l0": A_ : Tuple = value elif weight_type == "weight_ih_l1": A_ : Optional[int] = value elif weight_type == "weight_hh_l1": A_ : Dict = value elif weight_type == "bias_ih_l1": A_ : Optional[int] = value elif weight_type == "bias_hh_l1": A_ : Tuple = value else: A_ : Any = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A_, A_ : List[str] = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = [] if model_name == "encodec_24khz" or "encodec_32khz": A_ : List[str] = MAPPING_24K elif model_name == "encodec_48khz": A_ : str = MAPPING_48K else: raise ValueError(f'Unsupported model: {model_name}' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase__ , lowerCamelCase__ ): logger.info(f'{name} was ignored' ) continue A_ : str = False for key, mapped_key in MAPPING.items(): if "*" in key: A_, A_ : List[Any] = key.split(""".*.""" ) if prefix in name and suffix in name: A_ : Optional[Any] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ): continue A_ : Union[str, Any] = True if "*" in mapped_key: A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2] A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ ) if "weight_g" in name: A_ : Any = """weight_g""" elif "weight_v" in name: A_ : Tuple = """weight_v""" elif "weight_ih_l0" in name: A_ : Union[str, Any] = """weight_ih_l0""" elif "weight_hh_l0" in name: A_ : Tuple = """weight_hh_l0""" elif "bias_ih_l0" in name: A_ : str = """bias_ih_l0""" elif "bias_hh_l0" in name: A_ : List[Any] = """bias_hh_l0""" elif "weight_ih_l1" in name: A_ : Dict = """weight_ih_l1""" elif "weight_hh_l1" in name: A_ : Any = """weight_hh_l1""" elif "bias_ih_l1" in name: A_ : Optional[int] = """bias_ih_l1""" elif "bias_hh_l1" in name: A_ : List[Any] = """bias_hh_l1""" elif "bias" in name: A_ : List[str] = """bias""" elif "weight" in name: A_ : Optional[int] = """weight""" elif "running_mean" in name: A_ : Union[str, Any] = """running_mean""" elif "running_var" in name: A_ : Optional[int] = """running_var""" elif "num_batches_tracked" in name: A_ : List[Any] = """num_batches_tracked""" else: A_ : str = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f'Unused weights: {unused_weights}' ) @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): '''simple docstring''' if config_path is not None: A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ ) else: A_ : Optional[int] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A_ : Dict = [8, 5, 4, 4] A_ : Optional[Any] = [2.2] A_ : Tuple = 64 A_ : Tuple = 3_20_00 A_ : List[Any] = 20_48 A_ : Optional[Any] = False A_ : str = False A_ : Optional[int] = False elif model_name == "encodec_48khz": A_ : Dict = [8, 5, 4, 2] A_ : Tuple = [3.0, 6.0, 12.0, 24.0] A_ : List[Any] = 4_80_00 A_ : Dict = 2 A_ : Dict = False A_ : Dict = """time_group_norm""" A_ : Optional[Any] = True A_ : str = 1.0 A_ : Any = 0.01 else: raise ValueError(f'Unknown model name: {model_name}' ) A_ : Dict = EncodecModel(lowerCamelCase__ ) A_ : Any = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(lowerCamelCase__ ) A_ : int = torch.load(lowerCamelCase__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A_ : Tuple = original_checkpoint["""best_state"""] recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print("""Pushing to the hub...""" ) feature_extractor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Any = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowerCamelCase :Dict = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
686
1
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = 0 # if input_string is "aba" than new_input_string become "a|b|a" A_ : Any = """""" A_ : Any = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(lowerCamelCase__ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring A_, A_ : str = 0, 0 # length[i] shows the length of palindromic substring with center i A_ : List[str] = [1 for i in range(len(lowerCamelCase__ ) )] # for each character in new_string find corresponding palindromic string A_ : Dict = 0 for j in range(len(lowerCamelCase__ ) ): A_ : Optional[int] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(lowerCamelCase__ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 A_ : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: A_ : Any = j - k + 1 # noqa: E741 A_ : int = j + k - 1 # update max_length and start position if max_length < length[j]: A_ : str = length[j] A_ : List[Any] = j # create that string A_ : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
686
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :Any = logging.get_logger(__name__) lowerCamelCase :Any = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = 'beit' def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ): super().__init__(**lowercase ) A_ : Union[str, Any] = vocab_size A_ : List[str] = hidden_size A_ : Optional[int] = num_hidden_layers A_ : Tuple = num_attention_heads A_ : List[Any] = intermediate_size A_ : Optional[int] = hidden_act A_ : str = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : Dict = initializer_range A_ : str = layer_norm_eps A_ : Any = image_size A_ : int = patch_size A_ : List[str] = num_channels A_ : Any = use_mask_token A_ : Dict = use_absolute_position_embeddings A_ : List[Any] = use_relative_position_bias A_ : Tuple = use_shared_relative_position_bias A_ : Optional[int] = layer_scale_init_value A_ : Tuple = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : Optional[int] = use_auxiliary_head A_ : Union[str, Any] = auxiliary_loss_weight A_ : Tuple = auxiliary_channels A_ : List[Any] = auxiliary_num_convs A_ : Dict = auxiliary_concat_input A_ : Optional[Any] = semantic_loss_ignore_index class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4
686
1
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCamelCase :Any = re.compile(R'''\s+''') def a ( lowerCamelCase__ ): '''simple docstring''' return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def a ( lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""] A_ : Optional[int] = example["""content"""].splitlines() for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ): '''simple docstring''' A_ : Any = ["""unit tests""", """test file""", """configuration file"""] A_ : List[str] = example["""content"""].splitlines() A_ : str = 0 A_ : Union[str, Any] = 0 # first test for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test A_ : List[Any] = example["""content"""].count("""\n""" ) A_ : Any = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = ["""def """, """class """, """for """, """while """] A_ : Optional[int] = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def a ( lowerCamelCase__ , lowerCamelCase__=4 ): '''simple docstring''' A_ : Tuple = example["""content"""].splitlines() A_ : int = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""] A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ ) return {"ratio": ratio} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = {} results.update(get_hash(lowerCamelCase__ ) ) results.update(line_stats(lowerCamelCase__ ) ) results.update(alpha_stats(lowerCamelCase__ ) ) results.update(char_token_ratio(lowerCamelCase__ ) ) results.update(is_autogenerated(lowerCamelCase__ ) ) results.update(is_config_or_test(lowerCamelCase__ ) ) results.update(has_no_keywords(lowerCamelCase__ ) ) results.update(has_few_assignments(lowerCamelCase__ ) ) return results def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def a ( lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ , """rb""" ) as f_in: with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ ) os.unlink(lowerCamelCase__ ) # Settings lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments) lowerCamelCase :Tuple = parser.parse_args() if args.num_workers is None: lowerCamelCase :Tuple = multiprocessing.cpu_count() lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCamelCase :List[Any] = time.time() lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''') print(F"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCamelCase :int = time.time() lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers) print(F"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCamelCase :int = set(ds.unique('''hash''')) lowerCamelCase :List[str] = len(uniques) / len(ds) print(F"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCamelCase :Dict = time.time() lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"Time to filter dataset: {time.time()-t_start:.2f}") print(F"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCamelCase :List[str] = time.time() lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(F"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCamelCase :int = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) lowerCamelCase :Tuple = output_dir / '''data''' data_dir.mkdir(exist_ok=True) lowerCamelCase :Tuple = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json") lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"Time to save dataset: {time.time()-t_start:.2f}")
686
'''simple docstring''' import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel lowerCamelCase :Optional[int] = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 1_3_1_0_7_2, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, } def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2 def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2 A_ : List[Any] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ ) class _lowerCAmelCase ( __UpperCAmelCase ): pass class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase ): super().__init__() A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 ) A_ : str = deepcopy(self.diffusion ) A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = MODELS_MAP[model_name]["""url"""] os.system(f'wget {url} ./' ) return f'./{model_name}.ckpt' lowerCamelCase :str = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } lowerCamelCase :str = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } lowerCamelCase :str = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } lowerCamelCase :int = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } lowerCamelCase :List[Any] = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } lowerCamelCase :Optional[Any] = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def a ( lowerCamelCase__ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""" , RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'ResConvBlock error with {name}' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def a ( lowerCamelCase__ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ): return name.replace(lowerCamelCase__ , lowerCamelCase__ ) elif name.startswith(lowerCamelCase__ ): return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value] raise ValueError(f'Attn error with {name}' ) def a ( lowerCamelCase__ , lowerCamelCase__=13 ): '''simple docstring''' A_ : Union[str, Any] = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""" , """time_proj""" ) A_ : Dict = 0 if string.startswith("""net.3.""" ): depth += 1 A_ : int = string[6:] elif string.startswith("""net.""" ): A_ : Tuple = string[4:] while string.startswith("""main.7.""" ): depth += 1 A_ : Dict = string[7:] if string.startswith("""main.""" ): A_ : Union[str, Any] = string[5:] # mid block if string[:2].isdigit(): A_ : Optional[Any] = string[:2] A_ : Optional[Any] = string[2:] else: A_ : List[Any] = string[0] A_ : Dict = string[1:] if depth == max_depth: A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num] A_ : Optional[Any] = """mid_block""" elif depth > 0 and int(lowerCamelCase__ ) < 7: A_ : Any = DOWN_NUM_TO_LAYER[layer_num] A_ : Union[str, Any] = f'down_blocks.{depth}' elif depth > 0 and int(lowerCamelCase__ ) > 7: A_ : List[str] = UP_NUM_TO_LAYER[layer_num] A_ : List[str] = f'up_blocks.{max_depth - depth - 1}' elif depth == 0: A_ : str = DEPTH_0_TO_LAYER[layer_num] A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' ) A_ : Optional[int] = string_left[1:] if "resnets" in new_layer: A_ : Tuple = convert_resconv_naming(lowerCamelCase__ ) elif "attentions" in new_layer: A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ ) A_ : Dict = new_string_left if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left else: A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue A_ : List[Any] = rename(lowerCamelCase__ ) # check if we need to transform from Conv => Linear for attention if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: A_ : int = v return new_state_dict def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if len(lowerCamelCase__ ) == 1: if len(v.shape ) == 3: # weight A_ : Optional[Any] = v[:, :, 0] else: # bias A_ : Union[str, Any] = v else: # qkv matrices A_ : Optional[int] = v.shape[0] A_ : str = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0] else: A_ : str = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}' A_ : int = download(lowerCamelCase__ ) A_ : Any = MODELS_MAP[model_name]["""sample_rate"""] A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""] A_ : Tuple = Object() A_ : Union[str, Any] = sample_size A_ : Tuple = sample_rate A_ : int = 0 A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ ) A_ : Optional[Any] = diffusers_model.state_dict() A_ : Dict = DiffusionUncond(lowerCamelCase__ ) orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] ) A_ : Any = orig_model.diffusion_ema.eval() A_ : Any = orig_model.state_dict() A_ : List[str] = rename_orig_weights(lowerCamelCase__ ) A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}' assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}' if key == "time_proj.weight": A_ : str = value.squeeze() A_ : Union[str, Any] = value diffusers_model.load_state_dict(lowerCamelCase__ ) A_ : Optional[Any] = 1_00 A_ : Union[str, Any] = 33 A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ ) A_ : List[str] = torch.manual_seed(lowerCamelCase__ ) A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ ) A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1] A_ : List[Any] = get_crash_schedule(lowerCamelCase__ ) A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ ) A_ : str = torch.manual_seed(33 ) A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} ) A_ : str = generated.clamp(-1 , 1 ) A_ : List[Any] = (generated - audio).abs().sum() A_ : int = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""" , lowerCamelCase__ ) print("""Diff max""" , lowerCamelCase__ ) assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/' print(f'Conversion for {model_name} successful!' ) if __name__ == "__main__": lowerCamelCase :int = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') lowerCamelCase :List[str] = parser.parse_args() main(args)
686
1
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) lowerCamelCase :int = logging.getLogger(__name__) lowerCamelCase :Any = tf.data.AUTOTUNE def a ( ): '''simple docstring''' A_ : str = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=lowerCamelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=lowerCamelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=lowerCamelCase__ , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=lowerCamelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=lowerCamelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=lowerCamelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=lowerCamelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=lowerCamelCase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=lowerCamelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=lowerCamelCase__ , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=lowerCamelCase__ , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=lowerCamelCase__ , default=5_12 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=lowerCamelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=lowerCamelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" ) A_ : Tuple = parser.parse_args() return args def a ( lowerCamelCase__ ): '''simple docstring''' try: if args.tpu_name: A_ : Dict = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: A_ : Dict = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(lowerCamelCase__ ) tf.tpu.experimental.initialize_tpu_system(lowerCamelCase__ ) return tpu def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Tuple = 0 for file in file_list: A_ : List[Any] = file.split("""/""" )[-1] A_ : Any = re.search(r"""-\d+-(\d+)\.tfrecord""" , lowerCamelCase__ ).group(1 ) A_ : Optional[Any] = int(lowerCamelCase__ ) num_samples += sample_count return num_samples def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ): '''simple docstring''' A_ : Optional[int] = count_samples(lowerCamelCase__ ) A_ : Union[str, Any] = tf.data.Dataset.from_tensor_slices(lowerCamelCase__ ) if shuffle: A_ : Dict = dataset.shuffle(len(lowerCamelCase__ ) ) A_ : Dict = tf.data.TFRecordDataset(lowerCamelCase__ , num_parallel_reads=lowerCamelCase__ ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here A_ : Dict = dataset.apply(tf.data.experimental.assert_cardinality(lowerCamelCase__ ) ) A_ : Any = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ ) if shuffle: assert shuffle_buffer_size is not None A_ : Optional[Any] = dataset.shuffle(args.shuffle_buffer_size ) A_ : List[str] = dataset.batch(lowerCamelCase__ , drop_remainder=lowerCamelCase__ ) A_ : str = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ ) A_ : Any = dataset.prefetch(lowerCamelCase__ ) return dataset def a ( lowerCamelCase__ ): '''simple docstring''' if not args.no_tpu: A_ : List[str] = initialize_tpu(lowerCamelCase__ ) A_ : Optional[int] = tf.distribute.TPUStrategy(lowerCamelCase__ ) else: A_ : Dict = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) A_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer ) A_ : Optional[Any] = AutoConfig.from_pretrained(args.pretrained_model_config ) A_ : Dict = tokenizer.vocab_size A_ : str = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' ) A_ : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' ) A_ : Tuple = count_samples(lowerCamelCase__ ) A_ : Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) A_ : Any = steps_per_epoch * args.num_epochs with strategy.scope(): A_ : Union[str, Any] = TFAutoModelForMaskedLM.from_config(lowerCamelCase__ ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built A_, A_ : Tuple = create_optimizer( num_train_steps=lowerCamelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowerCamelCase__ , metrics=["""accuracy"""] ) def decode_fn(lowerCamelCase__ ): A_ : Any = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(lowerCamelCase__ , lowerCamelCase__ ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. A_ : Tuple = DataCollatorForLanguageModeling( tokenizer=lowerCamelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCamelCase__ , return_tensors="""tf""" ) def mask_with_collator(lowerCamelCase__ ): # TF really needs an isin() function A_ : int = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) A_, A_ : Optional[int] = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(lowerCamelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCamelCase__ , ) return batch A_ : Optional[int] = args.per_replica_batch_size * strategy.num_replicas_in_sync A_ : Union[str, Any] = prepare_dataset( lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , ) A_ : Tuple = prepare_dataset( lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , ) A_ : int = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCamelCase__ ) ) model.fit( lowerCamelCase__ , validation_data=lowerCamelCase__ , epochs=args.num_epochs , callbacks=lowerCamelCase__ , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": lowerCamelCase :List[str] = parse_args() main(args)
686
'''simple docstring''' from math import factorial def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) ) coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
686
1
'''simple docstring''' from __future__ import annotations class _lowerCAmelCase : def __init__(self , lowercase ): A_ : Optional[Any] = TypeError( """Matrices must be formed from a list of zero or more lists containing at """ """least one and the same number of values, each of which must be of type """ """int or float.""" ) if len(lowercase ) != 0: A_ : List[Any] = len(rows[0] ) if cols == 0: raise error for row in rows: if len(lowercase ) != cols: raise error for value in row: if not isinstance(lowercase , (int, float) ): raise error A_ : str = rows else: A_ : Optional[int] = [] def _a (self ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _a (self ): return len(self.rows ) @property def _a (self ): return len(self.rows[0] ) @property def _a (self ): return (self.num_rows, self.num_columns) @property def _a (self ): return self.order[0] == self.order[1] def _a (self ): A_ : str = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(lowercase ) def _a (self ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _a (self ): return bool(self.determinant() ) def _a (self , lowercase , lowercase ): A_ : Optional[int] = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(lowercase ).determinant() def _a (self , lowercase , lowercase ): if (row + column) % 2 == 0: return self.get_minor(lowercase , lowercase ) return -1 * self.get_minor(lowercase , lowercase ) def _a (self ): return Matrix( [ [self.get_minor(lowercase , lowercase ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _a (self ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _a (self ): A_ : Optional[int] = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(lowercase ) def _a (self ): A_ : str = self.determinant() if not determinant: raise TypeError("""Only matrices with a non-zero determinant have an inverse""" ) return self.adjugate() * (1 / determinant) def __repr__(self ): return str(self.rows ) def __str__(self ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ """[""" + """. """.join([str(lowercase ) for value in row] ) + """.]""" for row in self.rows ] ) + "]" ) def _a (self , lowercase , lowercase = None ): A_ : Optional[Any] = TypeError("""Row must be a list containing all ints and/or floats""" ) if not isinstance(lowercase , lowercase ): raise type_error for value in row: if not isinstance(lowercase , (int, float) ): raise type_error if len(lowercase ) != self.num_columns: raise ValueError( """Row must be equal in length to the other rows in the matrix""" ) if position is None: self.rows.append(lowercase ) else: A_ : Any = self.rows[0:position] + [row] + self.rows[position:] def _a (self , lowercase , lowercase = None ): A_ : List[str] = TypeError( """Column must be a list containing all ints and/or floats""" ) if not isinstance(lowercase , lowercase ): raise type_error for value in column: if not isinstance(lowercase , (int, float) ): raise type_error if len(lowercase ) != self.num_rows: raise ValueError( """Column must be equal in length to the other columns in the matrix""" ) if position is None: A_ : Any = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: A_ : Optional[int] = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__(self , lowercase ): if not isinstance(lowercase , lowercase ): return NotImplemented return self.rows == other.rows def __ne__(self , lowercase ): return not self == other def __neg__(self ): return self * -1 def __add__(self , lowercase ): if self.order != other.order: raise ValueError("""Addition requires matrices of the same order""" ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__(self , lowercase ): if self.order != other.order: raise ValueError("""Subtraction requires matrices of the same order""" ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__(self , lowercase ): if isinstance(lowercase , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(lowercase , lowercase ): if self.num_columns != other.num_rows: raise ValueError( """The number of columns in the first matrix must """ """be equal to the number of rows in the second""" ) return Matrix( [ [Matrix.dot_product(lowercase , lowercase ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( """A Matrix can only be multiplied by an int, float, or another matrix""" ) def __pow__(self , lowercase ): if not isinstance(lowercase , lowercase ): raise TypeError("""A Matrix can only be raised to the power of an int""" ) if not self.is_square: raise ValueError("""Only square matrices can be raised to a power""" ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( """Only invertable matrices can be raised to a negative power""" ) A_ : List[str] = self for _ in range(other - 1 ): result *= self return result @classmethod def _a (cls , lowercase , lowercase ): return sum(row[i] * column[i] for i in range(len(lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
686
'''simple docstring''' import re def a ( lowerCamelCase__ ): '''simple docstring''' if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
686
1
'''simple docstring''' import torch from diffusers import StableDiffusionPipeline lowerCamelCase :List[str] = '''path-to-your-trained-model''' lowerCamelCase :List[str] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''') lowerCamelCase :List[str] = '''A photo of sks dog in a bucket''' lowerCamelCase :Union[str, Any] = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0] image.save('''dog-bucket.png''')
686
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCamelCase__ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCamelCase__ ): http_head("""https://huggingface.co""" )
686
1
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
686
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCamelCase :Any = re.compile(R'''\s+''') def a ( lowerCamelCase__ ): '''simple docstring''' return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def a ( lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""] A_ : Optional[int] = example["""content"""].splitlines() for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ): '''simple docstring''' A_ : Any = ["""unit tests""", """test file""", """configuration file"""] A_ : List[str] = example["""content"""].splitlines() A_ : str = 0 A_ : Union[str, Any] = 0 # first test for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test A_ : List[Any] = example["""content"""].count("""\n""" ) A_ : Any = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = ["""def """, """class """, """for """, """while """] A_ : Optional[int] = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def a ( lowerCamelCase__ , lowerCamelCase__=4 ): '''simple docstring''' A_ : Tuple = example["""content"""].splitlines() A_ : int = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""] A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ ) return {"ratio": ratio} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = {} results.update(get_hash(lowerCamelCase__ ) ) results.update(line_stats(lowerCamelCase__ ) ) results.update(alpha_stats(lowerCamelCase__ ) ) results.update(char_token_ratio(lowerCamelCase__ ) ) results.update(is_autogenerated(lowerCamelCase__ ) ) results.update(is_config_or_test(lowerCamelCase__ ) ) results.update(has_no_keywords(lowerCamelCase__ ) ) results.update(has_few_assignments(lowerCamelCase__ ) ) return results def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def a ( lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ , """rb""" ) as f_in: with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ ) os.unlink(lowerCamelCase__ ) # Settings lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments) lowerCamelCase :Tuple = parser.parse_args() if args.num_workers is None: lowerCamelCase :Tuple = multiprocessing.cpu_count() lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCamelCase :List[Any] = time.time() lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''') print(F"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCamelCase :int = time.time() lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers) print(F"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCamelCase :int = set(ds.unique('''hash''')) lowerCamelCase :List[str] = len(uniques) / len(ds) print(F"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCamelCase :Dict = time.time() lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"Time to filter dataset: {time.time()-t_start:.2f}") print(F"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCamelCase :List[str] = time.time() lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(F"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCamelCase :int = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) lowerCamelCase :Tuple = output_dir / '''data''' data_dir.mkdir(exist_ok=True) lowerCamelCase :Tuple = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json") lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"Time to save dataset: {time.time()-t_start:.2f}")
686
1
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _lowerCAmelCase : def __init__(self , lowercase , lowercase=2 , lowercase=True , lowercase=False , lowercase=10 , lowercase=3 , lowercase=32 * 8 , lowercase=32 * 8 , lowercase=4 , lowercase=64 , ): A_ : Tuple = parent A_ : str = batch_size A_ : List[str] = is_training A_ : Any = use_auxiliary_loss A_ : List[Any] = num_queries A_ : Union[str, Any] = num_channels A_ : Optional[Any] = min_size A_ : str = max_size A_ : Union[str, Any] = num_labels A_ : Tuple = hidden_dim A_ : int = hidden_dim def _a (self ): A_ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowercase ) A_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase ) A_ : List[str] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase ) > 0.5 ).float() A_ : str = (torch.rand((self.batch_size, self.num_labels) , device=lowercase ) > 0.5).long() A_ : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _a (self ): A_ : Dict = MaskaFormerConfig( hidden_size=self.hidden_dim , ) A_ : Union[str, Any] = self.num_queries A_ : int = self.num_labels A_ : List[str] = [1, 1, 1, 1] A_ : Any = self.num_channels A_ : Union[str, Any] = 64 A_ : Any = 128 A_ : List[str] = self.hidden_dim A_ : Any = self.hidden_dim A_ : int = self.hidden_dim return config def _a (self ): A_, A_, A_, A_, A_ : Optional[Any] = self.prepare_config_and_inputs() A_ : Optional[int] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def _a (self , lowercase , lowercase ): A_ : str = output.encoder_hidden_states A_ : str = output.pixel_decoder_hidden_states A_ : Union[str, Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowercase ) , config.decoder_layers ) def _a (self , lowercase , lowercase , lowercase , lowercase=False ): with torch.no_grad(): A_ : List[Any] = MaskaFormerModel(config=lowercase ) model.to(lowercase ) model.eval() A_ : Dict = model(pixel_values=lowercase , pixel_mask=lowercase ) A_ : int = model(lowercase , output_hidden_states=lowercase ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowercase , lowercase ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ): A_ : Tuple = MaskaFormerForUniversalSegmentation(config=lowercase ) model.to(lowercase ) model.eval() def comm_check_on_output(lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): A_ : int = model(pixel_values=lowercase , pixel_mask=lowercase ) A_ : List[Any] = model(lowercase ) comm_check_on_output(lowercase ) A_ : Any = model( pixel_values=lowercase , pixel_mask=lowercase , mask_labels=lowercase , class_labels=lowercase ) comm_check_on_output(lowercase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Optional[Any] = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {} __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : List[str] = False def _a (self ): A_ : List[Any] = MaskaFormerModelTester(self ) A_ : List[str] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase ) def _a (self ): self.config_tester.run_common_tests() def _a (self ): A_, A_ : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowercase , **lowercase , output_hidden_states=lowercase ) def _a (self ): A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowercase ) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""" ) def _a (self ): pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" ) def _a (self ): pass @unittest.skip(reason="""Mask2Former is not a generative model""" ) def _a (self ): pass @unittest.skip(reason="""Mask2Former does not use token embeddings""" ) def _a (self ): pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _a (self ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a (self ): pass def _a (self ): A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = model_class(lowercase ) A_ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Union[str, Any] = [*signature.parameters.keys()] A_ : Optional[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase ) @slow def _a (self ): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: A_ : str = MaskaFormerModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def _a (self ): A_ : Dict = (self.model_tester.min_size,) * 2 A_ : Dict = { """pixel_values""": torch.randn((2, 3, *size) , device=lowercase ), """mask_labels""": torch.randn((2, 10, *size) , device=lowercase ), """class_labels""": torch.zeros(2 , 10 , device=lowercase ).long(), } A_ : List[Any] = self.model_tester.get_config() A_ : Optional[Any] = MaskaFormerForUniversalSegmentation(lowercase ).to(lowercase ) A_ : List[str] = model(**lowercase ) self.assertTrue(outputs.loss is not None ) def _a (self ): A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowercase , **lowercase , output_hidden_states=lowercase ) def _a (self ): A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[str] = model_class(lowercase ).to(lowercase ) A_ : Tuple = model(**lowercase , output_attentions=lowercase ) self.assertTrue(outputs.attentions is not None ) def _a (self ): if not self.model_tester.is_training: return A_ : List[Any] = self.all_model_classes[1] A_, A_, A_, A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs() A_ : str = model_class(lowercase ) model.to(lowercase ) model.train() A_ : int = model(lowercase , mask_labels=lowercase , class_labels=lowercase ).loss loss.backward() def _a (self ): A_ : int = self.all_model_classes[1] A_, A_, A_, A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs() A_ : List[Any] = True A_ : Any = True A_ : Union[str, Any] = model_class(lowercase ).to(lowercase ) model.train() A_ : Dict = model(lowercase , mask_labels=lowercase , class_labels=lowercase ) A_ : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() A_ : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() A_ : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() A_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowercase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCamelCase :Optional[Any] = 1E-4 def a ( ): '''simple docstring''' A_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class _lowerCAmelCase ( unittest.TestCase ): @cached_property def _a (self ): return "facebook/mask2former-swin-small-coco-instance" @cached_property def _a (self ): return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _a (self ): A_ : Optional[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowercase ) A_ : str = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : Optional[Any] = image_processor(lowercase , return_tensors="""pt""" ).to(lowercase ) A_ : int = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowercase , (1, 3, 384, 384) ) with torch.no_grad(): A_ : Tuple = model(**lowercase ) A_ : List[str] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowercase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase , atol=lowercase ) ) A_ : List[Any] = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowercase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase , atol=lowercase ) ) A_ : Optional[Any] = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowercase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase , atol=lowercase ) ) def _a (self ): A_ : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase ).eval() A_ : Union[str, Any] = self.default_image_processor A_ : Tuple = prepare_img() A_ : Dict = image_processor(lowercase , return_tensors="""pt""" ).to(lowercase ) A_ : Any = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowercase , (1, 3, 384, 384) ) with torch.no_grad(): A_ : Optional[Any] = model(**lowercase ) # masks_queries_logits A_ : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) A_ : int = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] A_ : int = torch.tensor(lowercase ).to(lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase , atol=lowercase ) ) # class_queries_logits A_ : Optional[int] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) A_ : List[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase , atol=lowercase ) ) def _a (self ): A_ : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase ).eval() A_ : List[Any] = self.default_image_processor A_ : Any = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) A_ : str = inputs["""pixel_values"""].to(lowercase ) A_ : List[Any] = [el.to(lowercase ) for el in inputs["""mask_labels"""]] A_ : Tuple = [el.to(lowercase ) for el in inputs["""class_labels"""]] with torch.no_grad(): A_ : str = model(**lowercase ) self.assertTrue(outputs.loss is not None )
686
'''simple docstring''' import pytest lowerCamelCase :Optional[Any] = '''__dummy_dataset1__''' lowerCamelCase :List[Any] = ''' import json import os import datasets REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", ] ) ), "langs": datasets.Sequence(datasets.Value("string")), "spans": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, json.loads(line) ''' @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = dataset_loading_script_name A_ : int = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase__ ) A_ : Tuple = script_dir / f'{script_name}.py' with open(lowerCamelCase__ , """w""" ) as f: f.write(lowerCamelCase__ ) return str(lowerCamelCase__ )
686
1
'''simple docstring''' import torch from diffusers import DiffusionPipeline class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase ): super().__init__() self.register_modules(unet=lowercase , scheduler=lowercase ) def __call__(self ): A_ : str = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) A_ : Optional[Any] = 1 A_ : Tuple = self.unet(lowercase , lowercase ).sample A_ : Optional[int] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample A_ : Any = scheduler_output - scheduler_output + torch.ones_like(lowercase ) return result
686
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split lowerCamelCase :int = datasets.load_iris() lowerCamelCase :str = np.array(data['''data''']) lowerCamelCase :Dict = np.array(data['''target''']) lowerCamelCase :Union[str, Any] = data['''target_names'''] lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified A_ : List[str] = [] for data_point in data: A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
686
1
'''simple docstring''' import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase :List[Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_60_00 ): '''simple docstring''' A_ : List[str] = int(round(sample_rate * max_length ) ) if len(lowerCamelCase__ ) <= sample_length: return wav A_ : Optional[int] = randint(0 , len(lowerCamelCase__ ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'Name of a dataset from the datasets package'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__UpperCAmelCase , metadata={'help': 'A file containing the training audio paths and labels.'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__UpperCAmelCase , metadata={'help': 'A file containing the validation audio paths and labels.'} ) __SCREAMING_SNAKE_CASE : str = field( default='train' , metadata={ 'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\'' } , ) __SCREAMING_SNAKE_CASE : str = field( default='validation' , metadata={ 'help': ( 'The name of the training data set split to use (via the datasets library). Defaults to \'validation\'' ) } , ) __SCREAMING_SNAKE_CASE : str = field( default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , ) __SCREAMING_SNAKE_CASE : str = field( default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=__UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) __SCREAMING_SNAKE_CASE : float = field( default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , ) @dataclass class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : str = field( default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} ) __SCREAMING_SNAKE_CASE : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=__UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=__UpperCAmelCase , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=__UpperCAmelCase , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=__UpperCAmelCase , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __SCREAMING_SNAKE_CASE : Optional[bool] = field( default=__UpperCAmelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=__UpperCAmelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def _a (self ): if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , lowercase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def a ( ): '''simple docstring''' A_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A_, A_, A_ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A_, A_, A_ : Any = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , lowerCamelCase__ , lowerCamelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A_ : Any = training_args.get_process_log_level() logger.setLevel(lowerCamelCase__ ) transformers.utils.logging.set_verbosity(lowerCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. A_ : int = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. A_ : Union[str, Any] = DatasetDict() A_ : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) A_ : List[str] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' """Make sure to set `--audio_column_name` to the correct audio column - one of """ f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' """Make sure to set `--label_column_name` to the correct text column - one of """ f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy A_ : List[str] = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. A_ : List[Any] = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) A_ : int = feature_extractor.model_input_names[0] def train_transforms(lowerCamelCase__ ): A_ : Optional[int] = [] for audio in batch[data_args.audio_column_name]: A_ : Union[str, Any] = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(lowerCamelCase__ ) A_ : Optional[int] = feature_extractor(lowerCamelCase__ , sampling_rate=feature_extractor.sampling_rate ) A_ : str = {model_input_name: inputs.get(lowerCamelCase__ )} A_ : int = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(lowerCamelCase__ ): A_ : List[Any] = [audio["""array"""] for audio in batch[data_args.audio_column_name]] A_ : Optional[int] = feature_extractor(lowerCamelCase__ , sampling_rate=feature_extractor.sampling_rate ) A_ : Union[str, Any] = {model_input_name: inputs.get(lowerCamelCase__ )} A_ : Tuple = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. A_ : List[str] = raw_datasets["""train"""].features[data_args.label_column_name].names A_, A_ : List[str] = {}, {} for i, label in enumerate(lowerCamelCase__ ): A_ : Optional[Any] = str(lowerCamelCase__ ) A_ : List[str] = label # Load the accuracy metric from the datasets package A_ : Any = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(lowerCamelCase__ ): A_ : List[Any] = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=lowerCamelCase__ , references=eval_pred.label_ids ) A_ : Any = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCamelCase__ ) , labelaid=lowerCamelCase__ , idalabel=lowerCamelCase__ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A_ : Tuple = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: A_ : Tuple = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(lowerCamelCase__ , output_all_columns=lowerCamelCase__ ) if training_args.do_eval: if data_args.max_eval_samples is not None: A_ : Dict = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(lowerCamelCase__ , output_all_columns=lowerCamelCase__ ) # Initialize our trainer A_ : Tuple = Trainer( model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , ) # Training if training_args.do_train: A_ : List[Any] = None if training_args.resume_from_checkpoint is not None: A_ : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: A_ : Dict = last_checkpoint A_ : List[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: A_ : str = trainer.evaluate() trainer.log_metrics("""eval""" , lowerCamelCase__ ) trainer.save_metrics("""eval""" , lowerCamelCase__ ) # Write model card and (optionally) push to hub A_ : Optional[Any] = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowerCamelCase__ ) else: trainer.create_model_card(**lowerCamelCase__ ) if __name__ == "__main__": main()
686
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): def _a (self , lowercase ): if isinstance(lowercase , lowercase ): A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()] return labels def __call__(self , lowercase , lowercase , lowercase ): if len(lowercase ) == 0 or len(lowercase ) == 0: raise ValueError("""You must include at least one label and at least one sequence.""" ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( """The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """ """Make sure the passed template includes formatting syntax such as {{}} where the label should go.""" ).format(lowercase ) ) if isinstance(lowercase , lowercase ): A_ : Tuple = [sequences] A_ : int = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ): A_ : int = args_parser super().__init__(*lowercase , **lowercase ) if self.entailment_id == -1: logger.warning( """Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """ """-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" ) @property def _a (self ): for label, ind in self.model.config.labelaid.items(): if label.lower().startswith("""entail""" ): return ind return -1 def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ): A_ : Any = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( """Tokenizer was not supporting padding necessary for zero-shot, attempting to use """ """ `pad_token=eos_token`""" ) A_ : str = self.tokenizer.eos_token try: A_ : str = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , ) except Exception as e: if "too short" in str(lowercase ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. A_ : Any = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def _a (self , **lowercase ): if kwargs.get("""multi_class""" , lowercase ) is not None: A_ : Tuple = kwargs["""multi_class"""] logger.warning( """The `multi_class` argument has been deprecated and renamed to `multi_label`. """ """`multi_class` will be removed in a future version of Transformers.""" ) A_ : Optional[Any] = {} if "candidate_labels" in kwargs: A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] ) if "hypothesis_template" in kwargs: A_ : List[str] = kwargs["""hypothesis_template"""] A_ : List[Any] = {} if "multi_label" in kwargs: A_ : Optional[Any] = kwargs["""multi_label"""] return preprocess_params, {}, postprocess_params def __call__(self , lowercase , *lowercase , **lowercase , ): if len(lowercase ) == 0: pass elif len(lowercase ) == 1 and "candidate_labels" not in kwargs: A_ : Union[str, Any] = args[0] else: raise ValueError(F'Unable to understand extra arguments {args}' ) return super().__call__(lowercase , **lowercase ) def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ): A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ): A_ : List[Any] = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowercase ) - 1, **model_input, } def _a (self , lowercase ): A_ : Optional[Any] = inputs["""candidate_label"""] A_ : List[Any] = inputs["""sequence"""] A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names} A_ : List[str] = self.model(**lowercase ) A_ : str = { """candidate_label""": candidate_label, """sequence""": sequence, """is_last""": inputs["""is_last"""], **outputs, } return model_outputs def _a (self , lowercase , lowercase=False ): A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs] A_ : str = [outputs["""sequence"""] for outputs in model_outputs] A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] ) A_ : Dict = logits.shape[0] A_ : Any = len(lowercase ) A_ : List[str] = N // n A_ : Tuple = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowercase ) == 1: # softmax over the entailment vs. contradiction dim for each label independently A_ : Union[str, Any] = self.entailment_id A_ : Any = -1 if entailment_id == 0 else 0 A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]] A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase ) A_ : Optional[Any] = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels A_ : Optional[int] = reshaped_outputs[..., self.entailment_id] A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase ) A_ : Any = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
686
1
'''simple docstring''' import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Optional[int] = logging.get_logger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = os.path.abspath(lowerCamelCase__ ) logger.info(f'Converting TensorFlow checkpoint from {tf_path}' ) # Load weights from TF model A_ : int = tf.train.list_variables(lowerCamelCase__ ) A_ : Dict = [] A_ : Tuple = [] A_ : Union[str, Any] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") A_ : Tuple = full_name.split("""/""" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f'Skipping non-model layer {full_name}' ) continue if "optimizer" in full_name: logger.info(f'Skipping optimization layer {full_name}' ) continue if name[0] == "model": # ignore initial 'model' A_ : List[str] = name[1:] # figure out how many levels deep the name is A_ : int = 0 for _name in name: if _name.startswith("""layer_with_weights""" ): depth += 1 else: break layer_depth.append(lowerCamelCase__ ) # read data A_ : List[str] = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) names.append("""/""".join(lowerCamelCase__ ) ) arrays.append(lowerCamelCase__ ) logger.info(f'Read a total of {len(lowerCamelCase__ ):,} layers' ) # Sanity check if len(set(lowerCamelCase__ ) ) != 1: raise ValueError(f'Found layer names with different depths (layer depth {list(set(lowerCamelCase__ ) )})' ) A_ : Optional[int] = list(set(lowerCamelCase__ ) )[0] if layer_depth != 1: raise ValueError( """The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP""" """ heads.""" ) # convert layers logger.info("""Converting weights...""" ) for full_name, array in zip(lowerCamelCase__ , lowerCamelCase__ ): A_ : List[Any] = full_name.split("""/""" ) A_ : Tuple = model A_ : str = [] for i, m_name in enumerate(lowerCamelCase__ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("""layer_with_weights""" ): A_ : Optional[Any] = int(m_name.split("""-""" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["""embeddings""", """LayerNorm"""] ) A_ : Tuple = getattr(lowerCamelCase__ , """embeddings""" ) A_ : Tuple = getattr(lowerCamelCase__ , """LayerNorm""" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] ) A_ : Optional[int] = getattr(lowerCamelCase__ , """encoder""" ) A_ : str = getattr(lowerCamelCase__ , """layer""" ) A_ : Optional[Any] = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["""pooler""", """dense"""] ) A_ : Any = getattr(lowerCamelCase__ , """pooler""" ) A_ : str = getattr(lowerCamelCase__ , """dense""" ) elif m_name == "embeddings": trace.append("""embeddings""" ) A_ : Dict = getattr(lowerCamelCase__ , """embeddings""" ) if layer_num == 0: trace.append("""word_embeddings""" ) A_ : Optional[int] = getattr(lowerCamelCase__ , """word_embeddings""" ) elif layer_num == 1: trace.append("""position_embeddings""" ) A_ : Union[str, Any] = getattr(lowerCamelCase__ , """position_embeddings""" ) elif layer_num == 2: trace.append("""token_type_embeddings""" ) A_ : List[Any] = getattr(lowerCamelCase__ , """token_type_embeddings""" ) else: raise ValueError(f'Unknown embedding layer with name {full_name}' ) trace.append("""weight""" ) A_ : str = getattr(lowerCamelCase__ , """weight""" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["""attention""", """self"""] ) A_ : List[Any] = getattr(lowerCamelCase__ , """attention""" ) A_ : List[Any] = getattr(lowerCamelCase__ , """self""" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["""attention""", """output""", """LayerNorm"""] ) A_ : List[str] = getattr(lowerCamelCase__ , """attention""" ) A_ : List[Any] = getattr(lowerCamelCase__ , """output""" ) A_ : Dict = getattr(lowerCamelCase__ , """LayerNorm""" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["""attention""", """output""", """dense"""] ) A_ : Dict = getattr(lowerCamelCase__ , """attention""" ) A_ : str = getattr(lowerCamelCase__ , """output""" ) A_ : Tuple = getattr(lowerCamelCase__ , """dense""" ) elif m_name == "_output_dense": # output dense trace.extend(["""output""", """dense"""] ) A_ : Any = getattr(lowerCamelCase__ , """output""" ) A_ : Optional[Any] = getattr(lowerCamelCase__ , """dense""" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["""output""", """LayerNorm"""] ) A_ : Dict = getattr(lowerCamelCase__ , """output""" ) A_ : Optional[Any] = getattr(lowerCamelCase__ , """LayerNorm""" ) elif m_name == "_key_dense": # attention key trace.append("""key""" ) A_ : Optional[int] = getattr(lowerCamelCase__ , """key""" ) elif m_name == "_query_dense": # attention query trace.append("""query""" ) A_ : Optional[int] = getattr(lowerCamelCase__ , """query""" ) elif m_name == "_value_dense": # attention value trace.append("""value""" ) A_ : int = getattr(lowerCamelCase__ , """value""" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["""intermediate""", """dense"""] ) A_ : Any = getattr(lowerCamelCase__ , """intermediate""" ) A_ : Union[str, Any] = getattr(lowerCamelCase__ , """dense""" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("""output""" ) A_ : List[Any] = getattr(lowerCamelCase__ , """output""" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("""bias""" ) A_ : Dict = getattr(lowerCamelCase__ , """bias""" ) elif m_name in ["kernel", "gamma"]: trace.append("""weight""" ) A_ : Dict = getattr(lowerCamelCase__ , """weight""" ) else: logger.warning(f'Ignored {m_name}' ) # for certain layers reshape is necessary A_ : List[Any] = """.""".join(lowerCamelCase__ ) if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowerCamelCase__ ) or re.match( r"""(\S+)\.attention\.output\.dense\.weight""" , lowerCamelCase__ ): A_ : List[str] = array.reshape(pointer.data.shape ) if "kernel" in full_name: A_ : Optional[Any] = array.transpose() if pointer.shape == array.shape: A_ : Any = torch.from_numpy(lowerCamelCase__ ) else: raise ValueError( f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:' f' {array.shape}' ) logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' ) return model def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' logger.info(f'Loading model based on config from {config_path}...' ) A_ : Dict = BertConfig.from_json_file(lowerCamelCase__ ) A_ : Union[str, Any] = BertModel(lowerCamelCase__ ) # Load weights from checkpoint logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' ) load_tfa_weights_in_bert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Optional[int] = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model (must include filename).''', ) lowerCamelCase :Dict = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
686
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :int = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'yolos' def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ): super().__init__(**lowercase ) A_ : List[Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Any = num_attention_heads A_ : Any = intermediate_size A_ : int = hidden_act A_ : Optional[Any] = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : List[str] = initializer_range A_ : Optional[Any] = layer_norm_eps A_ : List[str] = image_size A_ : str = patch_size A_ : int = num_channels A_ : Optional[int] = qkv_bias A_ : List[Any] = num_detection_tokens A_ : Tuple = use_mid_position_embeddings A_ : int = auxiliary_loss # Hungarian matcher A_ : int = class_cost A_ : List[Any] = bbox_cost A_ : Optional[int] = giou_cost # Loss coefficients A_ : Any = bbox_loss_coefficient A_ : List[Any] = giou_loss_coefficient A_ : str = eos_coefficient class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4 @property def _a (self ): return 12
686
1
'''simple docstring''' # flake8: noqa # Lint as: python3 lowerCamelCase :List[Any] = [ '''VerificationMode''', '''Version''', '''disable_progress_bar''', '''enable_progress_bar''', '''is_progress_bar_enabled''', '''experimental''', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
686
'''simple docstring''' from jiwer import compute_measures import datasets lowerCamelCase :int = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' lowerCamelCase :int = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' lowerCamelCase :Optional[Any] = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def _a (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ] , ) def _a (self , lowercase=None , lowercase=None , lowercase=False ): if concatenate_texts: return compute_measures(lowercase , lowercase )["wer"] else: A_ : List[Any] = 0 A_ : Optional[int] = 0 for prediction, reference in zip(lowercase , lowercase ): A_ : Any = compute_measures(lowercase , lowercase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
686
1
'''simple docstring''' from __future__ import annotations import math def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if depth < 0: raise ValueError("""Depth cannot be less than 0""" ) if len(lowerCamelCase__ ) == 0: raise ValueError("""Scores cannot be empty""" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , ) return min( minimax(depth + 1 , node_index * 2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , ) def a ( ): '''simple docstring''' A_ : Dict = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] A_ : Optional[Any] = math.log(len(lowerCamelCase__ ) , 2 ) print("""Optimal value : """ , end="""""" ) print(minimax(0 , 0 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
686
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline __SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'negative_prompt', 'height', 'width', 'negative_prompt_embeds', } __SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'} __SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} ) __SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS __SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def _a (self ): torch.manual_seed(0 ) A_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) A_ : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , ) torch.manual_seed(0 ) A_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) A_ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) A_ : int = CLIPTextModel(lowercase ) A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) A_ : Any = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a (self , lowercase , lowercase=0 ): A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase ) A_ : int = image / 2 + 0.5 if str(lowercase ).startswith("""mps""" ): A_ : int = torch.manual_seed(lowercase ) else: A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase ) A_ : Union[str, Any] = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def _a (self ): A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ : Optional[Any] = self.get_dummy_components() A_ : Any = CycleDiffusionPipeline(**lowercase ) A_ : int = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : int = self.get_dummy_inputs(lowercase ) A_ : str = pipe(**lowercase ) A_ : str = output.images A_ : Dict = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _a (self ): A_ : Dict = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase , """half""" ): A_ : List[str] = module.half() A_ : List[Any] = CycleDiffusionPipeline(**lowercase ) A_ : Optional[Any] = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Any = self.get_dummy_inputs(lowercase ) A_ : Tuple = pipe(**lowercase ) A_ : List[str] = output.images A_ : Union[str, Any] = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _a (self ): return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def _a (self ): return super().test_inference_batch_single_identical() @skip_mps def _a (self ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def _a (self ): return super().test_save_load_optional_components() @skip_mps def _a (self ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _a (self ): A_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) A_ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) A_ : List[str] = init_image.resize((512, 512) ) A_ : Dict = """CompVis/stable-diffusion-v1-4""" A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" ) A_ : Any = CycleDiffusionPipeline.from_pretrained( lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : str = """A black colored car""" A_ : Dict = """A blue colored car""" A_ : Union[str, Any] = torch.manual_seed(0 ) A_ : Optional[int] = pipe( prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , ) A_ : str = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def _a (self ): A_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) A_ : Optional[int] = init_image.resize((512, 512) ) A_ : Optional[int] = """CompVis/stable-diffusion-v1-4""" A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" ) A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : Optional[Any] = """A black colored car""" A_ : int = """A blue colored car""" A_ : str = torch.manual_seed(0 ) A_ : Any = pipe( prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , ) A_ : int = output.images assert np.abs(image - expected_image ).max() < 2E-2
686
1
'''simple docstring''' import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : int = MobileBertConfig.from_json_file(lowerCamelCase__ ) print(f'Building PyTorch model from configuration: {config}' ) A_ : Optional[int] = MobileBertForPreTraining(lowerCamelCase__ ) # Load weights from tf checkpoint A_ : Dict = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--mobilebert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained MobileBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase :Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
686
'''simple docstring''' import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[str] = 'down' def _a (self ): A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'down' def _a (self ): A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' def _a (self ): A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' def _a (self ): A_, A_ : str = super().prepare_init_args_and_inputs_for_common() A_ : Optional[Any] = 32 return init_dict, inputs_dict def _a (self ): A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Union[str, Any] = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _a (self ): A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' @property def _a (self ): return super().get_dummy_input(include_skip_sample=lowercase ) def _a (self ): A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Union[str, Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_skip_sample=lowercase ) def _a (self ): A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Any = 'down' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : int = { """in_channels""": 32, """out_channels""": 32, } A_ : Any = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : Optional[Any] = { """in_channels""": 32, """out_channels""": 32, } A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'mid' def _a (self ): A_ : Optional[Any] = { """in_channels""": 32, """temb_channels""": 128, } A_ : Any = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'mid' def _a (self ): A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common() A_ : List[str] = 32 return init_dict, inputs_dict def _a (self ): A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'mid' @property def _a (self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common() A_ : Optional[int] = 32 return init_dict, inputs_dict def _a (self ): A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : str = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Any = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Union[str, Any] = 32 return init_dict, inputs_dict def _a (self ): A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : int = 32 return init_dict, inputs_dict def _a (self ): A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[str] = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _a (self ): A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : str = 'up' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32} A_ : Optional[int] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'up' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32} A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68] super().test_output(lowercase )
686
1
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def a ( ): '''simple docstring''' A_, A_ : Any = 9, 14 # noqa: F841 A_ : int = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] A_ : Optional[Any] = defaultdict(lowerCamelCase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) A_ : str = mst(lowerCamelCase__ ) A_ : Union[str, Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: A_ : Optional[int] = tuple(answer[:2] ) A_ : List[str] = tuple(edge[::-1] ) assert edge in result or reverse in result
686
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ = None ): '''simple docstring''' A_ : List[Any] = word_bank or [] # create a table A_ : int = len(lowerCamelCase__ ) + 1 A_ : list[list[list[str]]] = [] for _ in range(lowerCamelCase__ ): table.append([] ) # seed value A_ : Any = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowerCamelCase__ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowerCamelCase__ )] == word: A_ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowerCamelCase__ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowerCamelCase__ )]: combination.reverse() return table[len(lowerCamelCase__ )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
686
1
'''simple docstring''' from __future__ import annotations from math import pi def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if inductance < 0: raise ValueError("""Inductance cannot be negative""" ) if frequency < 0: raise ValueError("""Frequency cannot be negative""" ) if reactance < 0: raise ValueError("""Inductive reactance cannot be negative""" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
686
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = [] A_ : int = set({"""(""", """[""", """{"""} ) A_ : Union[str, Any] = set({""")""", """]""", """}"""} ) A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(lowerCamelCase__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowerCamelCase__ ) == 0 def a ( ): '''simple docstring''' A_ : int = input("""Enter sequence of brackets: """ ) if is_balanced(lowerCamelCase__ ): print(lowerCamelCase__ , """is balanced""" ) else: print(lowerCamelCase__ , """is not balanced""" ) if __name__ == "__main__": main()
686
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase :Dict = logging.get_logger(__name__) lowerCamelCase :Any = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = 'resnet' __SCREAMING_SNAKE_CASE : List[str] = ['basic', 'bottleneck'] def __init__(self , lowercase=3 , lowercase=64 , lowercase=[256, 512, 1024, 2048] , lowercase=[3, 4, 6, 3] , lowercase="bottleneck" , lowercase="relu" , lowercase=False , lowercase=None , lowercase=None , **lowercase , ): super().__init__(**lowercase ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) A_ : Union[str, Any] = num_channels A_ : Any = embedding_size A_ : Dict = hidden_sizes A_ : Tuple = depths A_ : Any = layer_type A_ : List[str] = hidden_act A_ : Union[str, Any] = downsample_in_first_stage A_ : int = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(lowercase ) + 1 )] A_, A_ : Optional[Any] = get_aligned_output_features_output_indices( out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Any = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-3
686
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ): super().__init__( lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , ) A_ : Optional[int] = field A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths} A_ : Optional[Any] = Json( cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , ) def _a (self ): # Build iterable dataset if self.streaming: A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A_ : int = None A_ : Union[str, Any] = None A_ : int = None A_ : List[str] = None self.builder.download_and_prepare( download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , ) A_ : str = self.builder.as_dataset( split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory ) return dataset class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ): if num_proc is not None and num_proc <= 0: raise ValueError(F'num_proc {num_proc} must be an integer > 0.' ) A_ : Any = dataset A_ : List[str] = path_or_buf A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A_ : Optional[Any] = num_proc A_ : List[Any] = """utf-8""" A_ : int = to_json_kwargs def _a (self ): A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase ) A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" ) A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False ) A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True ) A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'`datasets` currently does not support {compression} compression' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer: A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'The compression parameter is not supported when writing to a buffer, but compression={compression}' """ was passed. Please provide a local path instead.""" ) A_ : Union[str, Any] = self._write( file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs ) return written def _a (self , lowercase ): A_, A_, A_, A_, A_ : List[str] = args A_ : List[str] = query_table( table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , ) A_ : Any = batch.to_pandas().to_json( path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ): A_ : Dict = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(lowercase ) else: A_, A_ : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(lowercase ) return written
686
1
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig lowerCamelCase :List[Any] = logging.get_logger(__name__) # General docstring lowerCamelCase :Optional[Any] = '''ResNetConfig''' # Base docstring lowerCamelCase :Union[str, Any] = '''microsoft/resnet-50''' lowerCamelCase :int = [1, 2_0_4_8, 7, 7] # Image classification docstring lowerCamelCase :Dict = '''microsoft/resnet-50''' lowerCamelCase :int = '''tiger cat''' lowerCamelCase :int = [ '''microsoft/resnet-50''', # See all resnet models at https://huggingface.co/models?filter=resnet ] class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase , lowercase , lowercase = 3 , lowercase = 1 , lowercase = "relu" ): super().__init__() A_ : Optional[int] = nn.Convad( lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase ) A_ : Union[str, Any] = nn.BatchNormad(lowercase ) A_ : Any = ACTaFN[activation] if activation is not None else nn.Identity() def _a (self , lowercase ): A_ : Optional[int] = self.convolution(lowercase ) A_ : Dict = self.normalization(lowercase ) A_ : int = self.activation(lowercase ) return hidden_state class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase ): super().__init__() A_ : Union[str, Any] = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) A_ : List[str] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) A_ : int = config.num_channels def _a (self , lowercase ): A_ : Union[str, Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) A_ : int = self.embedder(lowercase ) A_ : int = self.pooler(lowercase ) return embedding class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase , lowercase , lowercase = 2 ): super().__init__() A_ : Any = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase ) A_ : Union[str, Any] = nn.BatchNormad(lowercase ) def _a (self , lowercase ): A_ : Tuple = self.convolution(lowercase ) A_ : Optional[int] = self.normalization(lowercase ) return hidden_state class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase , lowercase , lowercase = 1 , lowercase = "relu" ): super().__init__() A_ : List[Any] = in_channels != out_channels or stride != 1 A_ : List[Any] = ( ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity() ) A_ : Optional[Any] = nn.Sequential( ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , ) A_ : Optional[int] = ACTaFN[activation] def _a (self , lowercase ): A_ : Any = hidden_state A_ : Dict = self.layer(lowercase ) A_ : Tuple = self.shortcut(lowercase ) hidden_state += residual A_ : List[str] = self.activation(lowercase ) return hidden_state class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase , lowercase , lowercase = 1 , lowercase = "relu" , lowercase = 4 ): super().__init__() A_ : Union[str, Any] = in_channels != out_channels or stride != 1 A_ : Tuple = out_channels // reduction A_ : Any = ( ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity() ) A_ : List[str] = nn.Sequential( ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , ) A_ : List[str] = ACTaFN[activation] def _a (self , lowercase ): A_ : Tuple = hidden_state A_ : Tuple = self.layer(lowercase ) A_ : Any = self.shortcut(lowercase ) hidden_state += residual A_ : Optional[int] = self.activation(lowercase ) return hidden_state class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , ): super().__init__() A_ : Dict = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer A_ : Tuple = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def _a (self , lowercase ): A_ : Union[str, Any] = input for layer in self.layers: A_ : List[str] = layer(lowercase ) return hidden_state class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase ): super().__init__() A_ : List[str] = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) A_ : List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ): self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) ) def _a (self , lowercase , lowercase = False , lowercase = True ): A_ : Union[str, Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A_ : Union[str, Any] = hidden_states + (hidden_state,) A_ : List[str] = stage_module(lowercase ) if output_hidden_states: A_ : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowercase , hidden_states=lowercase , ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = ResNetConfig __SCREAMING_SNAKE_CASE : Optional[Any] = 'resnet' __SCREAMING_SNAKE_CASE : Tuple = 'pixel_values' __SCREAMING_SNAKE_CASE : Tuple = True def _a (self , lowercase ): if isinstance(lowercase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def _a (self , lowercase , lowercase=False ): if isinstance(lowercase , lowercase ): A_ : Dict = value lowerCamelCase :Optional[Any] = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCamelCase :Union[str, Any] = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( 'The bare ResNet model outputting raw features without any specific head on top.' , __UpperCAmelCase , ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase ): super().__init__(lowercase ) A_ : List[Any] = config A_ : Dict = ResNetEmbeddings(lowercase ) A_ : str = ResNetEncoder(lowercase ) A_ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _a (self , lowercase , lowercase = None , lowercase = None ): A_ : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict A_ : Optional[Any] = self.embedder(lowercase ) A_ : Union[str, Any] = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase ) A_ : int = encoder_outputs[0] A_ : Any = self.pooler(lowercase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCAmelCase , ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase ): super().__init__(lowercase ) A_ : Union[str, Any] = config.num_labels A_ : Tuple = ResNetModel(lowercase ) # classification head A_ : List[str] = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _a (self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ): A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : Dict = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase ) A_ : List[Any] = outputs.pooler_output if return_dict else outputs[1] A_ : str = self.classifier(lowercase ) A_ : Union[str, Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A_ : List[Any] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A_ : Optional[int] = """single_label_classification""" else: A_ : List[Any] = """multi_label_classification""" if self.config.problem_type == "regression": A_ : Union[str, Any] = MSELoss() if self.num_labels == 1: A_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() ) else: A_ : Tuple = loss_fct(lowercase , lowercase ) elif self.config.problem_type == "single_label_classification": A_ : str = CrossEntropyLoss() A_ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A_ : List[Any] = BCEWithLogitsLoss() A_ : List[str] = loss_fct(lowercase , lowercase ) if not return_dict: A_ : Tuple = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states ) @add_start_docstrings( '\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __UpperCAmelCase , ) class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ): def __init__(self , lowercase ): super().__init__(lowercase ) super()._init_backbone(lowercase ) A_ : Optional[int] = [config.embedding_size] + config.hidden_sizes A_ : Union[str, Any] = ResNetEmbeddings(lowercase ) A_ : Optional[Any] = ResNetEncoder(lowercase ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase ) @replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC ) def _a (self , lowercase , lowercase = None , lowercase = None ): A_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict A_ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = self.embedder(lowercase ) A_ : List[Any] = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase ) A_ : int = outputs.hidden_states A_ : str = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: A_ : List[Any] = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , )
686
'''simple docstring''' import os import sys import unittest lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Tuple = get_test_to_tester_mapping(lowercase ) A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase ) A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""} A_ : Union[str, Any] = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) def _a (self ): A_ : Optional[Any] = get_model_to_test_mapping(lowercase ) A_ : List[str] = get_model_to_test_mapping(lowercase ) A_ : Dict = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } A_ : Any = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) def _a (self ): A_ : List[Any] = get_model_to_tester_mapping(lowercase ) A_ : Optional[int] = get_model_to_tester_mapping(lowercase ) A_ : Dict = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } A_ : Dict = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
686
1
'''simple docstring''' import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[str] = 'down' def _a (self ): A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'down' def _a (self ): A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' def _a (self ): A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' def _a (self ): A_, A_ : str = super().prepare_init_args_and_inputs_for_common() A_ : Optional[Any] = 32 return init_dict, inputs_dict def _a (self ): A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Union[str, Any] = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _a (self ): A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' @property def _a (self ): return super().get_dummy_input(include_skip_sample=lowercase ) def _a (self ): A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Union[str, Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_skip_sample=lowercase ) def _a (self ): A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Any = 'down' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : int = { """in_channels""": 32, """out_channels""": 32, } A_ : Any = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : Optional[Any] = { """in_channels""": 32, """out_channels""": 32, } A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'mid' def _a (self ): A_ : Optional[Any] = { """in_channels""": 32, """temb_channels""": 128, } A_ : Any = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'mid' def _a (self ): A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common() A_ : List[str] = 32 return init_dict, inputs_dict def _a (self ): A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'mid' @property def _a (self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common() A_ : Optional[int] = 32 return init_dict, inputs_dict def _a (self ): A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : str = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Any = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Union[str, Any] = 32 return init_dict, inputs_dict def _a (self ): A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : int = 32 return init_dict, inputs_dict def _a (self ): A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[str] = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _a (self ): A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : str = 'up' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32} A_ : Optional[int] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'up' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32} A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68] super().test_output(lowercase )
686
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCamelCase :Any = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
686
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :List[Any] = logging.get_logger(__name__) lowerCamelCase :int = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] lowerCamelCase :List[str] = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = torch.load(lowerCamelCase__ , map_location="""cpu""" ) return sd def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=rename_keys_prefix ): '''simple docstring''' A_ : str = OrderedDict() A_ : Tuple = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A_ : str = key for name_pair in rename_keys_prefix: A_ : Optional[int] = new_key.replace(name_pair[0] , name_pair[1] ) A_ : Tuple = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A_ : str = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' assert ( checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS ), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.' # Get Config if "pre" in checkpoint_path: A_ : Tuple = """pretraining""" if "vcr" in checkpoint_path: A_ : List[str] = {"""visual_embedding_dim""": 5_12} elif "vqa_advanced" in checkpoint_path: A_ : str = {"""visual_embedding_dim""": 20_48} elif "vqa" in checkpoint_path: A_ : int = {"""visual_embedding_dim""": 20_48} elif "nlvr" in checkpoint_path: A_ : Any = {"""visual_embedding_dim""": 10_24} else: raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' ) else: if "vcr" in checkpoint_path: A_ : Tuple = {"""visual_embedding_dim""": 5_12} A_ : Optional[int] = """multichoice""" elif "vqa_advanced" in checkpoint_path: A_ : List[Any] = {"""visual_embedding_dim""": 20_48} A_ : Any = """vqa_advanced""" elif "vqa" in checkpoint_path: A_ : Dict = {"""visual_embedding_dim""": 20_48, """num_labels""": 31_29} A_ : Tuple = """vqa""" elif "nlvr" in checkpoint_path: A_ : List[Any] = { """visual_embedding_dim""": 10_24, """num_labels""": 2, } A_ : Optional[Any] = """nlvr""" A_ : Union[str, Any] = VisualBertConfig(**lowerCamelCase__ ) # Load State Dict A_ : List[str] = load_state_dict(lowerCamelCase__ ) A_ : int = get_new_dict(lowerCamelCase__ , lowerCamelCase__ ) if model_type == "pretraining": A_ : List[Any] = VisualBertForPreTraining(lowerCamelCase__ ) elif model_type == "vqa": A_ : List[str] = VisualBertForQuestionAnswering(lowerCamelCase__ ) elif model_type == "nlvr": A_ : Optional[int] = VisualBertForVisualReasoning(lowerCamelCase__ ) elif model_type == "multichoice": A_ : List[Any] = VisualBertForMultipleChoice(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) # Save Checkpoints Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') lowerCamelCase :Union[str, Any] = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
686
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCamelCase :Any = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def a ( lowerCamelCase__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCamelCase :Tuple = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCamelCase :List[Any] = parser.parse_args() if args.check_lib: lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''') lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent else: lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
686
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , *lowercase , **lowercase ): super().__init__(*lowercase , **lowercase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _a (self , lowercase=None ): A_ : List[Any] = {} if top_k is not None: A_ : List[str] = top_k return {}, {}, postprocess_params def __call__(self , lowercase , **lowercase ): return super().__call__(lowercase , **lowercase ) def _a (self , lowercase ): A_ : Optional[Any] = load_image(lowercase ) A_ : Union[str, Any] = self.image_processor(images=lowercase , return_tensors=self.framework ) return model_inputs def _a (self , lowercase ): A_ : Union[str, Any] = self.model(**lowercase ) return model_outputs def _a (self , lowercase , lowercase=5 ): if top_k > self.model.config.num_labels: A_ : Tuple = self.model.config.num_labels if self.framework == "pt": A_ : Union[str, Any] = model_outputs.logits.softmax(-1 )[0] A_, A_ : List[str] = probs.topk(lowercase ) elif self.framework == "tf": A_ : int = stable_softmax(model_outputs.logits , axis=-1 )[0] A_ : Union[str, Any] = tf.math.top_k(lowercase , k=lowercase ) A_, A_ : Dict = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F'Unsupported framework: {self.framework}' ) A_ : Optional[Any] = scores.tolist() A_ : Dict = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
686
'''simple docstring''' lowerCamelCase :dict[tuple[int, int, int], int] = {} def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on A_ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 ) A_ : Optional[Any] = state_late + state_absent + state_ontime A_ : Dict = prizestrings return prizestrings def a ( lowerCamelCase__ = 30 ): '''simple docstring''' return _calculate(lowerCamelCase__ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
686
1
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowerCamelCase :Any = importlib.util.find_spec('''s3fs''') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowerCamelCase :List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def a ( lowerCamelCase__ ): '''simple docstring''' if "://" in dataset_path: A_ : Union[str, Any] = dataset_path.split("""://""" )[1] return dataset_path def a ( lowerCamelCase__ ): '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = not is_remote_filesystem(lowerCamelCase__ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowerCamelCase__ ) , fs._strip_protocol(lowerCamelCase__ ) ) else: fs.mv(lowerCamelCase__ , lowerCamelCase__ , recursive=lowerCamelCase__ ) def a ( ): '''simple docstring''' if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: A_ : Tuple = None A_ : Union[str, Any] = None A_ : List[Any] = threading.Lock()
686
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Tuple = 'linear' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial' __SCREAMING_SNAKE_CASE : Optional[int] = 'constant' __SCREAMING_SNAKE_CASE : str = 'constant_with_warmup' __SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant' def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) ) return 1.0 return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' A_ : Optional[Any] = {} A_ : Optional[Any] = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: A_, A_ : Union[str, Any] = rule_str.split(""":""" ) A_ : Union[str, Any] = int(lowerCamelCase__ ) A_ : List[Any] = float(lowerCamelCase__ ) A_ : Union[str, Any] = value A_ : Optional[int] = float(rule_list[-1] ) def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ): def rule_func(lowerCamelCase__ ) -> float: A_ : str = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCamelCase__ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ): '''simple docstring''' A_ : Optional[Any] = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' ) def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A_ : str = lr_init - lr_end A_ : Tuple = num_training_steps - num_warmup_steps A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps A_ : Optional[int] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase :List[Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ): '''simple docstring''' A_ : Optional[Any] = SchedulerType(lowerCamelCase__ ) A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , ) return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
686
1
'''simple docstring''' import pytest lowerCamelCase :Optional[Any] = '''__dummy_dataset1__''' lowerCamelCase :List[Any] = ''' import json import os import datasets REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", ] ) ), "langs": datasets.Sequence(datasets.Value("string")), "spans": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, json.loads(line) ''' @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = dataset_loading_script_name A_ : int = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase__ ) A_ : Tuple = script_dir / f'{script_name}.py' with open(lowerCamelCase__ , """w""" ) as f: f.write(lowerCamelCase__ ) return str(lowerCamelCase__ )
686
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''') lowerCamelCase :int = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } lowerCamelCase :List[str] = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } lowerCamelCase :Union[str, Any] = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } lowerCamelCase :Dict = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } lowerCamelCase :int = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } lowerCamelCase :str = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } lowerCamelCase :List[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } lowerCamelCase :Tuple = [] lowerCamelCase :Dict = [] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for attribute in key.split(""".""" ): A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: A_ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Dict = value elif weight_type == "bias": A_ : Dict = value elif weight_type == "running_mean": A_ : Optional[Any] = value elif weight_type == "running_var": A_ : int = value elif weight_type == "num_batches_tracked": A_ : Optional[Any] = value elif weight_type == "weight_ih_l0": A_ : Optional[int] = value elif weight_type == "weight_hh_l0": A_ : Union[str, Any] = value elif weight_type == "bias_ih_l0": A_ : Optional[int] = value elif weight_type == "bias_hh_l0": A_ : Tuple = value elif weight_type == "weight_ih_l1": A_ : Optional[int] = value elif weight_type == "weight_hh_l1": A_ : Dict = value elif weight_type == "bias_ih_l1": A_ : Optional[int] = value elif weight_type == "bias_hh_l1": A_ : Tuple = value else: A_ : Any = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A_, A_ : List[str] = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = [] if model_name == "encodec_24khz" or "encodec_32khz": A_ : List[str] = MAPPING_24K elif model_name == "encodec_48khz": A_ : str = MAPPING_48K else: raise ValueError(f'Unsupported model: {model_name}' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase__ , lowerCamelCase__ ): logger.info(f'{name} was ignored' ) continue A_ : str = False for key, mapped_key in MAPPING.items(): if "*" in key: A_, A_ : List[Any] = key.split(""".*.""" ) if prefix in name and suffix in name: A_ : Optional[Any] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ): continue A_ : Union[str, Any] = True if "*" in mapped_key: A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2] A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ ) if "weight_g" in name: A_ : Any = """weight_g""" elif "weight_v" in name: A_ : Tuple = """weight_v""" elif "weight_ih_l0" in name: A_ : Union[str, Any] = """weight_ih_l0""" elif "weight_hh_l0" in name: A_ : Tuple = """weight_hh_l0""" elif "bias_ih_l0" in name: A_ : str = """bias_ih_l0""" elif "bias_hh_l0" in name: A_ : List[Any] = """bias_hh_l0""" elif "weight_ih_l1" in name: A_ : Dict = """weight_ih_l1""" elif "weight_hh_l1" in name: A_ : Any = """weight_hh_l1""" elif "bias_ih_l1" in name: A_ : Optional[int] = """bias_ih_l1""" elif "bias_hh_l1" in name: A_ : List[Any] = """bias_hh_l1""" elif "bias" in name: A_ : List[str] = """bias""" elif "weight" in name: A_ : Optional[int] = """weight""" elif "running_mean" in name: A_ : Union[str, Any] = """running_mean""" elif "running_var" in name: A_ : Optional[int] = """running_var""" elif "num_batches_tracked" in name: A_ : List[Any] = """num_batches_tracked""" else: A_ : str = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f'Unused weights: {unused_weights}' ) @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): '''simple docstring''' if config_path is not None: A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ ) else: A_ : Optional[int] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A_ : Dict = [8, 5, 4, 4] A_ : Optional[Any] = [2.2] A_ : Tuple = 64 A_ : Tuple = 3_20_00 A_ : List[Any] = 20_48 A_ : Optional[Any] = False A_ : str = False A_ : Optional[int] = False elif model_name == "encodec_48khz": A_ : Dict = [8, 5, 4, 2] A_ : Tuple = [3.0, 6.0, 12.0, 24.0] A_ : List[Any] = 4_80_00 A_ : Dict = 2 A_ : Dict = False A_ : Dict = """time_group_norm""" A_ : Optional[Any] = True A_ : str = 1.0 A_ : Any = 0.01 else: raise ValueError(f'Unknown model name: {model_name}' ) A_ : Dict = EncodecModel(lowerCamelCase__ ) A_ : Any = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(lowerCamelCase__ ) A_ : int = torch.load(lowerCamelCase__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A_ : Tuple = original_checkpoint["""best_state"""] recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print("""Pushing to the hub...""" ) feature_extractor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Any = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowerCamelCase :Dict = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
686
1
'''simple docstring''' from statistics import mean, stdev def a ( lowerCamelCase__ , lowerCamelCase__ = 3 ): '''simple docstring''' A_ : Any = min(lowerCamelCase__ ) A_ : int = max(lowerCamelCase__ ) # normalize data return [round((x - x_min) / (x_max - x_min) , lowerCamelCase__ ) for x in data] def a ( lowerCamelCase__ , lowerCamelCase__ = 3 ): '''simple docstring''' A_ : Optional[int] = mean(lowerCamelCase__ ) A_ : str = stdev(lowerCamelCase__ ) # standardize data return [round((x - mu) / (sigma) , lowerCamelCase__ ) for x in data]
686
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :Any = logging.get_logger(__name__) lowerCamelCase :Any = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = 'beit' def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ): super().__init__(**lowercase ) A_ : Union[str, Any] = vocab_size A_ : List[str] = hidden_size A_ : Optional[int] = num_hidden_layers A_ : Tuple = num_attention_heads A_ : List[Any] = intermediate_size A_ : Optional[int] = hidden_act A_ : str = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : Dict = initializer_range A_ : str = layer_norm_eps A_ : Any = image_size A_ : int = patch_size A_ : List[str] = num_channels A_ : Any = use_mask_token A_ : Dict = use_absolute_position_embeddings A_ : List[Any] = use_relative_position_bias A_ : Tuple = use_shared_relative_position_bias A_ : Optional[int] = layer_scale_init_value A_ : Tuple = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : Optional[int] = use_auxiliary_head A_ : Union[str, Any] = auxiliary_loss_weight A_ : Tuple = auxiliary_channels A_ : List[Any] = auxiliary_num_convs A_ : Dict = auxiliary_concat_input A_ : Optional[Any] = semantic_loss_ignore_index class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4
686
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCamelCase :Union[str, Any] = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: lowerCamelCase :Any = json.load(f) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def _a (self , lowercase ): return FSMTTokenizer.from_pretrained(lowercase ) def _a (self , lowercase ): A_ : int = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def _a (self , lowercase , lowercase ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality A_ : str = F'facebook/wmt19-{pair}' A_ : List[str] = self.get_tokenizer(lowercase ) A_ : List[str] = self.get_model(lowercase ) A_ : str = bleu_data[pair]["""src"""] A_ : Optional[Any] = bleu_data[pair]["""tgt"""] A_ : Dict = tokenizer(lowercase , return_tensors="""pt""" , truncation=lowercase , padding="""longest""" ).to(lowercase ) A_ : Dict = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A_ : List[Any] = tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) A_ : int = calculate_bleu(lowercase , lowercase ) print(lowercase ) self.assertGreaterEqual(scores["""bleu"""] , lowercase )
686
'''simple docstring''' import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel lowerCamelCase :Optional[int] = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 1_3_1_0_7_2, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, } def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2 def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2 A_ : List[Any] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ ) class _lowerCAmelCase ( __UpperCAmelCase ): pass class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase ): super().__init__() A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 ) A_ : str = deepcopy(self.diffusion ) A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = MODELS_MAP[model_name]["""url"""] os.system(f'wget {url} ./' ) return f'./{model_name}.ckpt' lowerCamelCase :str = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } lowerCamelCase :str = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } lowerCamelCase :str = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } lowerCamelCase :int = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } lowerCamelCase :List[Any] = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } lowerCamelCase :Optional[Any] = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def a ( lowerCamelCase__ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""" , RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'ResConvBlock error with {name}' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def a ( lowerCamelCase__ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ): return name.replace(lowerCamelCase__ , lowerCamelCase__ ) elif name.startswith(lowerCamelCase__ ): return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value] raise ValueError(f'Attn error with {name}' ) def a ( lowerCamelCase__ , lowerCamelCase__=13 ): '''simple docstring''' A_ : Union[str, Any] = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""" , """time_proj""" ) A_ : Dict = 0 if string.startswith("""net.3.""" ): depth += 1 A_ : int = string[6:] elif string.startswith("""net.""" ): A_ : Tuple = string[4:] while string.startswith("""main.7.""" ): depth += 1 A_ : Dict = string[7:] if string.startswith("""main.""" ): A_ : Union[str, Any] = string[5:] # mid block if string[:2].isdigit(): A_ : Optional[Any] = string[:2] A_ : Optional[Any] = string[2:] else: A_ : List[Any] = string[0] A_ : Dict = string[1:] if depth == max_depth: A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num] A_ : Optional[Any] = """mid_block""" elif depth > 0 and int(lowerCamelCase__ ) < 7: A_ : Any = DOWN_NUM_TO_LAYER[layer_num] A_ : Union[str, Any] = f'down_blocks.{depth}' elif depth > 0 and int(lowerCamelCase__ ) > 7: A_ : List[str] = UP_NUM_TO_LAYER[layer_num] A_ : List[str] = f'up_blocks.{max_depth - depth - 1}' elif depth == 0: A_ : str = DEPTH_0_TO_LAYER[layer_num] A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' ) A_ : Optional[int] = string_left[1:] if "resnets" in new_layer: A_ : Tuple = convert_resconv_naming(lowerCamelCase__ ) elif "attentions" in new_layer: A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ ) A_ : Dict = new_string_left if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left else: A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue A_ : List[Any] = rename(lowerCamelCase__ ) # check if we need to transform from Conv => Linear for attention if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: A_ : int = v return new_state_dict def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if len(lowerCamelCase__ ) == 1: if len(v.shape ) == 3: # weight A_ : Optional[Any] = v[:, :, 0] else: # bias A_ : Union[str, Any] = v else: # qkv matrices A_ : Optional[int] = v.shape[0] A_ : str = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0] else: A_ : str = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}' A_ : int = download(lowerCamelCase__ ) A_ : Any = MODELS_MAP[model_name]["""sample_rate"""] A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""] A_ : Tuple = Object() A_ : Union[str, Any] = sample_size A_ : Tuple = sample_rate A_ : int = 0 A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ ) A_ : Optional[Any] = diffusers_model.state_dict() A_ : Dict = DiffusionUncond(lowerCamelCase__ ) orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] ) A_ : Any = orig_model.diffusion_ema.eval() A_ : Any = orig_model.state_dict() A_ : List[str] = rename_orig_weights(lowerCamelCase__ ) A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}' assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}' if key == "time_proj.weight": A_ : str = value.squeeze() A_ : Union[str, Any] = value diffusers_model.load_state_dict(lowerCamelCase__ ) A_ : Optional[Any] = 1_00 A_ : Union[str, Any] = 33 A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ ) A_ : List[str] = torch.manual_seed(lowerCamelCase__ ) A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ ) A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1] A_ : List[Any] = get_crash_schedule(lowerCamelCase__ ) A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ ) A_ : str = torch.manual_seed(33 ) A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} ) A_ : str = generated.clamp(-1 , 1 ) A_ : List[Any] = (generated - audio).abs().sum() A_ : int = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""" , lowerCamelCase__ ) print("""Diff max""" , lowerCamelCase__ ) assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/' print(f'Conversion for {model_name} successful!' ) if __name__ == "__main__": lowerCamelCase :int = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') lowerCamelCase :List[str] = parser.parse_args() main(args)
686
1
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = [] A_ : int = set({"""(""", """[""", """{"""} ) A_ : Union[str, Any] = set({""")""", """]""", """}"""} ) A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(lowerCamelCase__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowerCamelCase__ ) == 0 def a ( ): '''simple docstring''' A_ : int = input("""Enter sequence of brackets: """ ) if is_balanced(lowerCamelCase__ ): print(lowerCamelCase__ , """is balanced""" ) else: print(lowerCamelCase__ , """is not balanced""" ) if __name__ == "__main__": main()
686
'''simple docstring''' from math import factorial def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) ) coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
686
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase :int = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Dict = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Union[str, Any] = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :List[str] = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Tuple = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Union[str, Any] = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
686
'''simple docstring''' import re def a ( lowerCamelCase__ ): '''simple docstring''' if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
686
1
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): def _a (self , lowercase ): if isinstance(lowercase , lowercase ): A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()] return labels def __call__(self , lowercase , lowercase , lowercase ): if len(lowercase ) == 0 or len(lowercase ) == 0: raise ValueError("""You must include at least one label and at least one sequence.""" ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( """The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """ """Make sure the passed template includes formatting syntax such as {{}} where the label should go.""" ).format(lowercase ) ) if isinstance(lowercase , lowercase ): A_ : Tuple = [sequences] A_ : int = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ): A_ : int = args_parser super().__init__(*lowercase , **lowercase ) if self.entailment_id == -1: logger.warning( """Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """ """-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" ) @property def _a (self ): for label, ind in self.model.config.labelaid.items(): if label.lower().startswith("""entail""" ): return ind return -1 def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ): A_ : Any = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( """Tokenizer was not supporting padding necessary for zero-shot, attempting to use """ """ `pad_token=eos_token`""" ) A_ : str = self.tokenizer.eos_token try: A_ : str = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , ) except Exception as e: if "too short" in str(lowercase ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. A_ : Any = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def _a (self , **lowercase ): if kwargs.get("""multi_class""" , lowercase ) is not None: A_ : Tuple = kwargs["""multi_class"""] logger.warning( """The `multi_class` argument has been deprecated and renamed to `multi_label`. """ """`multi_class` will be removed in a future version of Transformers.""" ) A_ : Optional[Any] = {} if "candidate_labels" in kwargs: A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] ) if "hypothesis_template" in kwargs: A_ : List[str] = kwargs["""hypothesis_template"""] A_ : List[Any] = {} if "multi_label" in kwargs: A_ : Optional[Any] = kwargs["""multi_label"""] return preprocess_params, {}, postprocess_params def __call__(self , lowercase , *lowercase , **lowercase , ): if len(lowercase ) == 0: pass elif len(lowercase ) == 1 and "candidate_labels" not in kwargs: A_ : Union[str, Any] = args[0] else: raise ValueError(F'Unable to understand extra arguments {args}' ) return super().__call__(lowercase , **lowercase ) def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ): A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ): A_ : List[Any] = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowercase ) - 1, **model_input, } def _a (self , lowercase ): A_ : Optional[Any] = inputs["""candidate_label"""] A_ : List[Any] = inputs["""sequence"""] A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names} A_ : List[str] = self.model(**lowercase ) A_ : str = { """candidate_label""": candidate_label, """sequence""": sequence, """is_last""": inputs["""is_last"""], **outputs, } return model_outputs def _a (self , lowercase , lowercase=False ): A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs] A_ : str = [outputs["""sequence"""] for outputs in model_outputs] A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] ) A_ : Dict = logits.shape[0] A_ : Any = len(lowercase ) A_ : List[str] = N // n A_ : Tuple = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowercase ) == 1: # softmax over the entailment vs. contradiction dim for each label independently A_ : Union[str, Any] = self.entailment_id A_ : Any = -1 if entailment_id == 0 else 0 A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]] A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase ) A_ : Optional[Any] = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels A_ : Optional[int] = reshaped_outputs[..., self.entailment_id] A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase ) A_ : Any = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
686
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCamelCase__ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCamelCase__ ): http_head("""https://huggingface.co""" )
686
1
'''simple docstring''' import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class _lowerCAmelCase : def __init__(self , lowercase ): if isinstance(lowercase , lowercase ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden A_ : Union[str, Any] = deepcopy(lowercase ) elif os.path.exists(lowercase ): with io.open(lowercase , """r""" , encoding="""utf-8""" ) as f: A_ : Tuple = json.load(lowercase ) else: try: A_ : Union[str, Any] = baseaa.urlsafe_baadecode(lowercase ).decode("""utf-8""" ) A_ : Any = json.loads(lowercase ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' ) A_ : int = config self.set_stage_and_offload() def _a (self ): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. A_ : List[Any] = self.get_value("""zero_optimization.stage""" , -1 ) # offload A_ : List[str] = False if self.is_zeroa() or self.is_zeroa(): A_ : Tuple = set(["""cpu""", """nvme"""] ) A_ : List[Any] = set( [ self.get_value("""zero_optimization.offload_optimizer.device""" ), self.get_value("""zero_optimization.offload_param.device""" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: A_ : Tuple = True def _a (self , lowercase ): A_ : Dict = self.config # find the config node of interest if it exists A_ : Tuple = ds_key_long.split(""".""" ) A_ : Optional[Any] = nodes.pop() for node in nodes: A_ : Any = config.get(lowercase ) if config is None: return None, ds_key return config, ds_key def _a (self , lowercase , lowercase=None ): A_, A_ : Dict = self.find_config_node(lowercase ) if config is None: return default return config.get(lowercase , lowercase ) def _a (self , lowercase , lowercase=False ): A_ : int = self.config # find the config node of interest if it exists A_ : int = ds_key_long.split(""".""" ) for node in nodes: A_ : Optional[Any] = config A_ : Tuple = config.get(lowercase ) if config is None: if must_exist: raise ValueError(F'Can\'t find {ds_key_long} entry in the config: {self.config}' ) else: return # if found remove it if parent_config is not None: parent_config.pop(lowercase ) def _a (self , lowercase ): A_ : str = self.get_value(lowercase ) return False if value is None else bool(lowercase ) def _a (self , lowercase ): A_ : List[str] = self.get_value(lowercase ) return False if value is None else not bool(lowercase ) def _a (self ): return self._stage == 2 def _a (self ): return self._stage == 3 def _a (self ): return self._offload class _lowerCAmelCase : def __init__(self , lowercase ): A_ : Dict = engine def _a (self , lowercase , **lowercase ): # runs backpropagation and handles mixed precision self.engine.backward(lowercase , **lowercase ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase ): super().__init__(lowercase , device_placement=lowercase , scaler=lowercase ) A_ : Any = hasattr(self.optimizer , """overflow""" ) def _a (self , lowercase=None ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def _a (self ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def _a (self ): if self.__has_overflow__: return self.optimizer.overflow return False class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase ): super().__init__(lowercase , lowercase ) def _a (self ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class _lowerCAmelCase : def __init__(self , lowercase , lowercase=0.0_01 , lowercase=0 , **lowercase ): A_ : List[str] = params A_ : Optional[int] = lr A_ : Union[str, Any] = weight_decay A_ : int = kwargs class _lowerCAmelCase : def __init__(self , lowercase , lowercase=None , lowercase=0 , **lowercase ): A_ : int = optimizer A_ : Tuple = total_num_steps A_ : Tuple = warmup_num_steps A_ : str = kwargs
686
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCamelCase :Any = re.compile(R'''\s+''') def a ( lowerCamelCase__ ): '''simple docstring''' return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def a ( lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""] A_ : Optional[int] = example["""content"""].splitlines() for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ): '''simple docstring''' A_ : Any = ["""unit tests""", """test file""", """configuration file"""] A_ : List[str] = example["""content"""].splitlines() A_ : str = 0 A_ : Union[str, Any] = 0 # first test for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test A_ : List[Any] = example["""content"""].count("""\n""" ) A_ : Any = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = ["""def """, """class """, """for """, """while """] A_ : Optional[int] = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def a ( lowerCamelCase__ , lowerCamelCase__=4 ): '''simple docstring''' A_ : Tuple = example["""content"""].splitlines() A_ : int = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""] A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ ) return {"ratio": ratio} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = {} results.update(get_hash(lowerCamelCase__ ) ) results.update(line_stats(lowerCamelCase__ ) ) results.update(alpha_stats(lowerCamelCase__ ) ) results.update(char_token_ratio(lowerCamelCase__ ) ) results.update(is_autogenerated(lowerCamelCase__ ) ) results.update(is_config_or_test(lowerCamelCase__ ) ) results.update(has_no_keywords(lowerCamelCase__ ) ) results.update(has_few_assignments(lowerCamelCase__ ) ) return results def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def a ( lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ , """rb""" ) as f_in: with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ ) os.unlink(lowerCamelCase__ ) # Settings lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments) lowerCamelCase :Tuple = parser.parse_args() if args.num_workers is None: lowerCamelCase :Tuple = multiprocessing.cpu_count() lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCamelCase :List[Any] = time.time() lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''') print(F"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCamelCase :int = time.time() lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers) print(F"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCamelCase :int = set(ds.unique('''hash''')) lowerCamelCase :List[str] = len(uniques) / len(ds) print(F"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCamelCase :Dict = time.time() lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"Time to filter dataset: {time.time()-t_start:.2f}") print(F"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCamelCase :List[str] = time.time() lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(F"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCamelCase :int = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) lowerCamelCase :Tuple = output_dir / '''data''' data_dir.mkdir(exist_ok=True) lowerCamelCase :Tuple = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json") lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"Time to save dataset: {time.time()-t_start:.2f}")
686
1
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif stress < 0: raise ValueError("""Stress cannot be negative""" ) elif tangential_force < 0: raise ValueError("""Tangential Force cannot be negative""" ) elif area < 0: raise ValueError("""Area cannot be negative""" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
686
'''simple docstring''' import pytest lowerCamelCase :Optional[Any] = '''__dummy_dataset1__''' lowerCamelCase :List[Any] = ''' import json import os import datasets REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", ] ) ), "langs": datasets.Sequence(datasets.Value("string")), "spans": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, json.loads(line) ''' @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = dataset_loading_script_name A_ : int = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase__ ) A_ : Tuple = script_dir / f'{script_name}.py' with open(lowerCamelCase__ , """w""" ) as f: f.write(lowerCamelCase__ ) return str(lowerCamelCase__ )
686
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :Optional[Any] = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = 'deit' def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ): super().__init__(**lowercase ) A_ : int = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : int = num_attention_heads A_ : List[Any] = intermediate_size A_ : List[Any] = hidden_act A_ : Tuple = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : str = initializer_range A_ : Dict = layer_norm_eps A_ : Optional[int] = image_size A_ : List[str] = patch_size A_ : Optional[Any] = num_channels A_ : Dict = qkv_bias A_ : int = encoder_stride class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4
686
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split lowerCamelCase :int = datasets.load_iris() lowerCamelCase :str = np.array(data['''data''']) lowerCamelCase :Dict = np.array(data['''target''']) lowerCamelCase :Union[str, Any] = data['''target_names'''] lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified A_ : List[str] = [] for data_point in data: A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
686
1
'''simple docstring''' from math import factorial def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) ) coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
686
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): def _a (self , lowercase ): if isinstance(lowercase , lowercase ): A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()] return labels def __call__(self , lowercase , lowercase , lowercase ): if len(lowercase ) == 0 or len(lowercase ) == 0: raise ValueError("""You must include at least one label and at least one sequence.""" ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( """The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """ """Make sure the passed template includes formatting syntax such as {{}} where the label should go.""" ).format(lowercase ) ) if isinstance(lowercase , lowercase ): A_ : Tuple = [sequences] A_ : int = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ): A_ : int = args_parser super().__init__(*lowercase , **lowercase ) if self.entailment_id == -1: logger.warning( """Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """ """-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" ) @property def _a (self ): for label, ind in self.model.config.labelaid.items(): if label.lower().startswith("""entail""" ): return ind return -1 def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ): A_ : Any = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( """Tokenizer was not supporting padding necessary for zero-shot, attempting to use """ """ `pad_token=eos_token`""" ) A_ : str = self.tokenizer.eos_token try: A_ : str = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , ) except Exception as e: if "too short" in str(lowercase ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. A_ : Any = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def _a (self , **lowercase ): if kwargs.get("""multi_class""" , lowercase ) is not None: A_ : Tuple = kwargs["""multi_class"""] logger.warning( """The `multi_class` argument has been deprecated and renamed to `multi_label`. """ """`multi_class` will be removed in a future version of Transformers.""" ) A_ : Optional[Any] = {} if "candidate_labels" in kwargs: A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] ) if "hypothesis_template" in kwargs: A_ : List[str] = kwargs["""hypothesis_template"""] A_ : List[Any] = {} if "multi_label" in kwargs: A_ : Optional[Any] = kwargs["""multi_label"""] return preprocess_params, {}, postprocess_params def __call__(self , lowercase , *lowercase , **lowercase , ): if len(lowercase ) == 0: pass elif len(lowercase ) == 1 and "candidate_labels" not in kwargs: A_ : Union[str, Any] = args[0] else: raise ValueError(F'Unable to understand extra arguments {args}' ) return super().__call__(lowercase , **lowercase ) def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ): A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ): A_ : List[Any] = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowercase ) - 1, **model_input, } def _a (self , lowercase ): A_ : Optional[Any] = inputs["""candidate_label"""] A_ : List[Any] = inputs["""sequence"""] A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names} A_ : List[str] = self.model(**lowercase ) A_ : str = { """candidate_label""": candidate_label, """sequence""": sequence, """is_last""": inputs["""is_last"""], **outputs, } return model_outputs def _a (self , lowercase , lowercase=False ): A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs] A_ : str = [outputs["""sequence"""] for outputs in model_outputs] A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] ) A_ : Dict = logits.shape[0] A_ : Any = len(lowercase ) A_ : List[str] = N // n A_ : Tuple = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowercase ) == 1: # softmax over the entailment vs. contradiction dim for each label independently A_ : Union[str, Any] = self.entailment_id A_ : Any = -1 if entailment_id == 0 else 0 A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]] A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase ) A_ : Optional[Any] = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels A_ : Optional[int] = reshaped_outputs[..., self.entailment_id] A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase ) A_ : Any = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
686
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase :int = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :int = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Tuple = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
686
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :int = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'yolos' def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ): super().__init__(**lowercase ) A_ : List[Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Any = num_attention_heads A_ : Any = intermediate_size A_ : int = hidden_act A_ : Optional[Any] = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : List[str] = initializer_range A_ : Optional[Any] = layer_norm_eps A_ : List[str] = image_size A_ : str = patch_size A_ : int = num_channels A_ : Optional[int] = qkv_bias A_ : List[Any] = num_detection_tokens A_ : Tuple = use_mid_position_embeddings A_ : int = auxiliary_loss # Hungarian matcher A_ : int = class_cost A_ : List[Any] = bbox_cost A_ : Optional[int] = giou_cost # Loss coefficients A_ : Any = bbox_loss_coefficient A_ : List[Any] = giou_loss_coefficient A_ : str = eos_coefficient class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4 @property def _a (self ): return 12
686
1
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
686
'''simple docstring''' from jiwer import compute_measures import datasets lowerCamelCase :int = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' lowerCamelCase :int = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' lowerCamelCase :Optional[Any] = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def _a (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ] , ) def _a (self , lowercase=None , lowercase=None , lowercase=False ): if concatenate_texts: return compute_measures(lowercase , lowercase )["wer"] else: A_ : List[Any] = 0 A_ : Optional[int] = 0 for prediction, reference in zip(lowercase , lowercase ): A_ : Any = compute_measures(lowercase , lowercase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
686
1
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCamelCase :Dict = 2_0_0 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCamelCase :Dict = 5_0 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCamelCase :str = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_0_0_0)) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = len([g for position, g in enumerate(lowerCamelCase__ ) if g == main_target[position]] ) return (item, float(lowerCamelCase__ )) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = random.randint(0 , len(lowerCamelCase__ ) - 1 ) A_ : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:] A_ : str = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : int = list(lowerCamelCase__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: A_ : Optional[int] = random.choice(lowerCamelCase__ ) return "".join(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): '''simple docstring''' A_ : str = [] # Generate more children proportionally to the fitness score. A_ : List[str] = int(parent_a[1] * 1_00 ) + 1 A_ : Any = 10 if child_n >= 10 else child_n for _ in range(lowerCamelCase__ ): A_ : Tuple = population_score[random.randint(0 , lowerCamelCase__ )][0] A_, A_ : List[Any] = crossover(parent_a[0] , lowerCamelCase__ ) # Append new string to the population list. pop.append(mutate(lowerCamelCase__ , lowerCamelCase__ ) ) pop.append(mutate(lowerCamelCase__ , lowerCamelCase__ ) ) return pop def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ): '''simple docstring''' if N_POPULATION < N_SELECTED: A_ : Tuple = f'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(lowerCamelCase__ ) # Verify that the target contains no genes besides the ones inside genes variable. A_ : List[str] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: A_ : Optional[Any] = f'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(lowerCamelCase__ ) # Generate random starting population. A_ : Union[str, Any] = [] for _ in range(lowerCamelCase__ ): population.append("""""".join([random.choice(lowerCamelCase__ ) for i in range(len(lowerCamelCase__ ) )] ) ) # Just some logs to know what the algorithms is doing. A_, A_ : Tuple = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowerCamelCase__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. A_ : Optional[Any] = [evaluate(lowerCamelCase__ , lowerCamelCase__ ) for item in population] # Check if there is a matching evolution. A_ : int = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] , reverse=lowerCamelCase__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'\nGeneration: {generation}' f'\nTotal Population:{total_population}' f'\nBest score: {population_score[0][1]}' f'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. A_ : List[Any] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowerCamelCase__ ) # Normalize population score to be between 0 and 1. A_ : Optional[int] = [ (item, score / len(lowerCamelCase__ )) for item, score in population_score ] # This is selection for i in range(lowerCamelCase__ ): population.extend(select(population_score[int(lowerCamelCase__ )] , lowerCamelCase__ , lowerCamelCase__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowerCamelCase__ ) > N_POPULATION: break if __name__ == "__main__": lowerCamelCase :Tuple = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowerCamelCase :List[Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowerCamelCase , lowerCamelCase , lowerCamelCase :Tuple = basic(target_str, genes_list) print( F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
686
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline __SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'negative_prompt', 'height', 'width', 'negative_prompt_embeds', } __SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'} __SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} ) __SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS __SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def _a (self ): torch.manual_seed(0 ) A_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) A_ : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , ) torch.manual_seed(0 ) A_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) A_ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) A_ : int = CLIPTextModel(lowercase ) A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) A_ : Any = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a (self , lowercase , lowercase=0 ): A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase ) A_ : int = image / 2 + 0.5 if str(lowercase ).startswith("""mps""" ): A_ : int = torch.manual_seed(lowercase ) else: A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase ) A_ : Union[str, Any] = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def _a (self ): A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ : Optional[Any] = self.get_dummy_components() A_ : Any = CycleDiffusionPipeline(**lowercase ) A_ : int = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : int = self.get_dummy_inputs(lowercase ) A_ : str = pipe(**lowercase ) A_ : str = output.images A_ : Dict = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _a (self ): A_ : Dict = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase , """half""" ): A_ : List[str] = module.half() A_ : List[Any] = CycleDiffusionPipeline(**lowercase ) A_ : Optional[Any] = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Any = self.get_dummy_inputs(lowercase ) A_ : Tuple = pipe(**lowercase ) A_ : List[str] = output.images A_ : Union[str, Any] = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _a (self ): return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def _a (self ): return super().test_inference_batch_single_identical() @skip_mps def _a (self ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def _a (self ): return super().test_save_load_optional_components() @skip_mps def _a (self ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _a (self ): A_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) A_ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) A_ : List[str] = init_image.resize((512, 512) ) A_ : Dict = """CompVis/stable-diffusion-v1-4""" A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" ) A_ : Any = CycleDiffusionPipeline.from_pretrained( lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : str = """A black colored car""" A_ : Dict = """A blue colored car""" A_ : Union[str, Any] = torch.manual_seed(0 ) A_ : Optional[int] = pipe( prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , ) A_ : str = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def _a (self ): A_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) A_ : Optional[int] = init_image.resize((512, 512) ) A_ : Optional[int] = """CompVis/stable-diffusion-v1-4""" A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" ) A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : Optional[Any] = """A black colored car""" A_ : int = """A blue colored car""" A_ : str = torch.manual_seed(0 ) A_ : Any = pipe( prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , ) A_ : int = output.images assert np.abs(image - expected_image ).max() < 2E-2
686
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer lowerCamelCase :List[Any] = ['''gpt2'''] lowerCamelCase :Any = '''gpt2''' if is_tf_available(): class _lowerCAmelCase ( tf.Module ): def __init__(self , lowercase ): super().__init__() A_ : Optional[Any] = tokenizer A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) A_ : Union[str, Any] = TFGPTaLMHeadModel.from_config(lowercase ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) ) def _a (self , lowercase ): A_ : List[Any] = self.tokenizer(lowercase ) A_ : Tuple = tokenized["""input_ids"""].to_tensor() A_ : Union[str, Any] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) A_ : Optional[Any] = self.model(input_ids=lowercase , attention_mask=lowercase )["""logits"""] return outputs @require_tf @require_keras_nlp class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): super().setUp() A_ : Union[str, Any] = [GPTaTokenizer.from_pretrained(lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS)] A_ : int = [TFGPTaTokenizer.from_pretrained(lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) A_ : Union[str, Any] = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] A_ : List[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def _a (self ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: A_ : Union[str, Any] = tokenizer([test_inputs] , return_tensors="""tf""" ) A_ : str = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors A_ : Any = python_outputs[key].numpy() A_ : Optional[int] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(lowercase , tf.intaa ) == tf_outputs_values ) ) @slow def _a (self ): for tf_tokenizer in self.tf_tokenizers: A_ : Any = tf.function(lowercase ) for test_inputs in self.test_sentences: A_ : int = tf.constant(lowercase ) A_ : List[Any] = compiled_tokenizer(lowercase ) A_ : str = tf_tokenizer(lowercase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _a (self ): for tf_tokenizer in self.tf_tokenizers: A_ : List[Any] = ModelToSave(tokenizer=lowercase ) A_ : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] ) A_ : Optional[Any] = model.serving(lowercase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: A_ : Any = Path(lowercase ) / """saved.model""" tf.saved_model.save(lowercase , lowercase , signatures={"""serving_default""": model.serving} ) A_ : Dict = tf.saved_model.load(lowercase ) A_ : str = loaded_model.signatures["""serving_default"""](lowercase )["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def _a (self ): for tf_tokenizer in self.tf_tokenizers: A_ : Any = tf.convert_to_tensor([self.test_sentences[0]] ) A_ : Optional[int] = tf_tokenizer(lowercase ) # Build model with some sample inputs A_ : List[Any] = tf_tokenizer.get_config() A_ : Dict = TFGPTaTokenizer.from_config(lowercase ) A_ : Optional[int] = model_from_config(lowercase ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def _a (self ): for tf_tokenizer in self.tf_tokenizers: # for the test to run A_ : Any = 123123 for max_length in [3, 5, 1024]: A_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] ) A_ : str = tf_tokenizer(lowercase , max_length=lowercase ) A_ : Optional[int] = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
686
'''simple docstring''' import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[str] = 'down' def _a (self ): A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'down' def _a (self ): A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' def _a (self ): A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' def _a (self ): A_, A_ : str = super().prepare_init_args_and_inputs_for_common() A_ : Optional[Any] = 32 return init_dict, inputs_dict def _a (self ): A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Union[str, Any] = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _a (self ): A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' @property def _a (self ): return super().get_dummy_input(include_skip_sample=lowercase ) def _a (self ): A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Union[str, Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_skip_sample=lowercase ) def _a (self ): A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Any = 'down' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : int = { """in_channels""": 32, """out_channels""": 32, } A_ : Any = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : Optional[Any] = { """in_channels""": 32, """out_channels""": 32, } A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'mid' def _a (self ): A_ : Optional[Any] = { """in_channels""": 32, """temb_channels""": 128, } A_ : Any = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'mid' def _a (self ): A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common() A_ : List[str] = 32 return init_dict, inputs_dict def _a (self ): A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'mid' @property def _a (self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common() A_ : Optional[int] = 32 return init_dict, inputs_dict def _a (self ): A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : str = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Any = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Union[str, Any] = 32 return init_dict, inputs_dict def _a (self ): A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : int = 32 return init_dict, inputs_dict def _a (self ): A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[str] = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _a (self ): A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : str = 'up' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32} A_ : Optional[int] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'up' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32} A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68] super().test_output(lowercase )
686
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCamelCase :Optional[Any] = logging.get_logger(__name__) lowerCamelCase :List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase :Dict = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } lowerCamelCase :List[Any] = { '''roberta-base''': 5_1_2, '''roberta-large''': 5_1_2, '''roberta-large-mnli''': 5_1_2, '''distilroberta-base''': 5_1_2, '''roberta-base-openai-detector''': 5_1_2, '''roberta-large-openai-detector''': 5_1_2, } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Optional[Any] = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE : Tuple = RobertaTokenizer def __init__(self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ): super().__init__( lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , ) A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space: A_ : Tuple = getattr(lowercase , pre_tok_state.pop("""type""" ) ) A_ : str = add_prefix_space A_ : Optional[Any] = pre_tok_class(**lowercase ) A_ : Union[str, Any] = add_prefix_space A_ : Optional[Any] = """post_processor""" A_ : List[str] = getattr(self.backend_tokenizer , lowercase , lowercase ) if tokenizer_component_instance: A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : Any = tuple(state["""sep"""] ) if "cls" in state: A_ : Union[str, Any] = tuple(state["""cls"""] ) A_ : Tuple = False if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space: A_ : Optional[Any] = add_prefix_space A_ : List[Any] = True if state.get("""trim_offsets""" , lowercase ) != trim_offsets: A_ : List[str] = trim_offsets A_ : Tuple = True if changes_to_apply: A_ : List[str] = getattr(lowercase , state.pop("""type""" ) ) A_ : Dict = component_class(**lowercase ) setattr(self.backend_tokenizer , lowercase , lowercase ) @property def _a (self ): if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _a (self , lowercase ): A_ : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value A_ : Union[str, Any] = value def _a (self , *lowercase , **lowercase ): A_ : List[Any] = kwargs.get("""is_split_into_words""" , lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase , **lowercase ) def _a (self , *lowercase , **lowercase ): A_ : int = kwargs.get("""is_split_into_words""" , lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase , **lowercase ) def _a (self , lowercase , lowercase = None ): A_ : Optional[int] = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase ) def _a (self , lowercase , lowercase=None ): A_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _a (self , lowercase , lowercase = None ): A_ : Dict = [self.sep_token_id] A_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
686
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ = None ): '''simple docstring''' A_ : List[Any] = word_bank or [] # create a table A_ : int = len(lowerCamelCase__ ) + 1 A_ : list[list[list[str]]] = [] for _ in range(lowerCamelCase__ ): table.append([] ) # seed value A_ : Any = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowerCamelCase__ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowerCamelCase__ )] == word: A_ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowerCamelCase__ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowerCamelCase__ )]: combination.reverse() return table[len(lowerCamelCase__ )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
686
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Optional[Any] = logging.get_logger(__name__) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = SwinConfig( embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , ) A_ : Optional[int] = DetaConfig( backbone_config=lowerCamelCase__ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=lowerCamelCase__ , with_box_refine=lowerCamelCase__ , two_stage=lowerCamelCase__ , ) # set labels A_ : Tuple = """huggingface/label-files""" if "o365" in model_name: A_ : Tuple = 3_66 A_ : Union[str, Any] = """object365-id2label.json""" else: A_ : str = 91 A_ : int = """coco-detection-id2label.json""" A_ : List[Any] = num_labels A_ : Tuple = json.load(open(cached_download(hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) ) A_ : Union[str, Any] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} A_ : Tuple = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = [] # stem # fmt: off rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") ) rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") ) rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") ) rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") ) rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") ) rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') ) # fmt: on return rename_keys def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = dct.pop(lowerCamelCase__ ) A_ : List[Any] = val def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A_ : Optional[int] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A_ : Dict = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' ) A_ : Optional[int] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A_ : Optional[int] = in_proj_weight[:dim, :] A_ : Dict = in_proj_bias[: dim] A_ : Tuple = in_proj_weight[ dim : dim * 2, : ] A_ : Optional[int] = in_proj_bias[ dim : dim * 2 ] A_ : str = in_proj_weight[ -dim :, : ] A_ : Any = in_proj_bias[-dim :] # fmt: on def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention A_ : Dict = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Union[str, Any] = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : Union[str, Any] = in_proj_weight[:hidden_size, :] A_ : int = in_proj_bias[:hidden_size] A_ : List[str] = in_proj_weight[ hidden_size : hidden_size * 2, : ] A_ : Any = in_proj_bias[hidden_size : hidden_size * 2] A_ : Optional[int] = in_proj_weight[-hidden_size:, :] A_ : List[str] = in_proj_bias[-hidden_size:] def a ( ): '''simple docstring''' A_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = get_deta_config(lowerCamelCase__ ) # load original state dict if model_name == "deta-swin-large": A_ : Dict = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" ) elif model_name == "deta-swin-large-o365": A_ : Optional[int] = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" ) else: raise ValueError(f'Model name {model_name} not supported' ) A_ : List[str] = torch.load(lowerCamelCase__ , map_location="""cpu""" )["""model"""] # original state dict for name, param in state_dict.items(): print(lowerCamelCase__ , param.shape ) # rename keys A_ : Any = create_rename_keys(lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_swin_q_k_v(lowerCamelCase__ , config.backbone_config ) read_in_decoder_q_k_v(lowerCamelCase__ , lowerCamelCase__ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: A_ : Optional[Any] = state_dict.pop(lowerCamelCase__ ) A_ : Tuple = val if "input_proj" in key: A_ : Optional[int] = state_dict.pop(lowerCamelCase__ ) A_ : Optional[int] = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: A_ : str = state_dict.pop(lowerCamelCase__ ) A_ : List[Any] = val # finally, create HuggingFace model and load state dict A_ : List[Any] = DetaForObjectDetection(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() A_ : int = """cuda""" if torch.cuda.is_available() else """cpu""" model.to(lowerCamelCase__ ) # load image processor A_ : Optional[Any] = DetaImageProcessor(format="""coco_detection""" ) # verify our conversion on image A_ : Any = prepare_img() A_ : List[str] = processor(images=lowerCamelCase__ , return_tensors="""pt""" ) A_ : Dict = encoding["""pixel_values"""] A_ : Optional[int] = model(pixel_values.to(lowerCamelCase__ ) ) # verify logits print("""Logits:""" , outputs.logits[0, :3, :3] ) print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": A_ : Optional[int] = torch.tensor( [[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] ) A_ : List[str] = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] ) elif model_name == "deta-swin-large-o365": A_ : Optional[int] = torch.tensor( [[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] ) A_ : int = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCamelCase__ ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCamelCase__ ) , atol=1E-4 ) print("""Everything ok!""" ) if pytorch_dump_folder_path: # Save model and processor logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) # Push to hub if push_to_hub: print("""Pushing model and processor to hub...""" ) model.push_to_hub(f'jozhang97/{model_name}' ) processor.push_to_hub(f'jozhang97/{model_name}' ) if __name__ == "__main__": lowerCamelCase :List[Any] = argparse.ArgumentParser() parser.add_argument( '''--model_name''', type=str, default='''deta-swin-large''', choices=['''deta-swin-large''', '''deta-swin-large-o365'''], help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase :List[str] = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
686
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = [] A_ : int = set({"""(""", """[""", """{"""} ) A_ : Union[str, Any] = set({""")""", """]""", """}"""} ) A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(lowerCamelCase__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowerCamelCase__ ) == 0 def a ( ): '''simple docstring''' A_ : int = input("""Enter sequence of brackets: """ ) if is_balanced(lowerCamelCase__ ): print(lowerCamelCase__ , """is balanced""" ) else: print(lowerCamelCase__ , """is not balanced""" ) if __name__ == "__main__": main()
686
1
'''simple docstring''' import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class _lowerCAmelCase ( unittest.TestCase ): def _a (self , lowercase , lowercase ): return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy' def _a (self ): # clean up the VRAM after each test super().tearDown() gc.collect() def _a (self , lowercase=0 , lowercase=(4, 4, 64, 64) , lowercase=False ): A_ : Dict = jnp.bfloataa if fpaa else jnp.floataa A_ : str = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase ) return image def _a (self , lowercase=False , lowercase="CompVis/stable-diffusion-v1-4" ): A_ : Any = jnp.bfloataa if fpaa else jnp.floataa A_ : str = """bf16""" if fpaa else None A_, A_ : List[Any] = FlaxUNetaDConditionModel.from_pretrained( lowercase , subfolder="""unet""" , dtype=lowercase , revision=lowercase ) return model, params def _a (self , lowercase=0 , lowercase=(4, 77, 768) , lowercase=False ): A_ : str = jnp.bfloataa if fpaa else jnp.floataa A_ : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]], [17, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]], [8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]], [3, 1000, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]], # fmt: on ] ) def _a (self , lowercase , lowercase , lowercase ): A_, A_ : Any = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=lowercase ) A_ : Optional[Any] = self.get_latents(lowercase , fpaa=lowercase ) A_ : Optional[int] = self.get_encoder_hidden_states(lowercase , fpaa=lowercase ) A_ : int = model.apply( {"""params""": params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample assert sample.shape == latents.shape A_ : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) A_ : Optional[Any] = jnp.array(lowercase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]], [17, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]], [8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]], [3, 1000, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]], # fmt: on ] ) def _a (self , lowercase , lowercase , lowercase ): A_, A_ : Optional[int] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=lowercase ) A_ : Dict = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase ) A_ : List[str] = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1024) , fpaa=lowercase ) A_ : Optional[int] = model.apply( {"""params""": params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample assert sample.shape == latents.shape A_ : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) A_ : List[str] = jnp.array(lowercase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
686
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ): super().__init__( lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , ) A_ : Optional[int] = field A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths} A_ : Optional[Any] = Json( cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , ) def _a (self ): # Build iterable dataset if self.streaming: A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A_ : int = None A_ : Union[str, Any] = None A_ : int = None A_ : List[str] = None self.builder.download_and_prepare( download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , ) A_ : str = self.builder.as_dataset( split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory ) return dataset class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ): if num_proc is not None and num_proc <= 0: raise ValueError(F'num_proc {num_proc} must be an integer > 0.' ) A_ : Any = dataset A_ : List[str] = path_or_buf A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A_ : Optional[Any] = num_proc A_ : List[Any] = """utf-8""" A_ : int = to_json_kwargs def _a (self ): A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase ) A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" ) A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False ) A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True ) A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'`datasets` currently does not support {compression} compression' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer: A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'The compression parameter is not supported when writing to a buffer, but compression={compression}' """ was passed. Please provide a local path instead.""" ) A_ : Union[str, Any] = self._write( file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs ) return written def _a (self , lowercase ): A_, A_, A_, A_, A_ : List[str] = args A_ : List[str] = query_table( table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , ) A_ : Any = batch.to_pandas().to_json( path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ): A_ : Dict = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(lowercase ) else: A_, A_ : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(lowercase ) return written
686
1
'''simple docstring''' import os import sys import unittest lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Tuple = get_test_to_tester_mapping(lowercase ) A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase ) A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""} A_ : Union[str, Any] = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) def _a (self ): A_ : Optional[Any] = get_model_to_test_mapping(lowercase ) A_ : List[str] = get_model_to_test_mapping(lowercase ) A_ : Dict = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } A_ : Any = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) def _a (self ): A_ : List[Any] = get_model_to_tester_mapping(lowercase ) A_ : Optional[int] = get_model_to_tester_mapping(lowercase ) A_ : Dict = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } A_ : Dict = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
686
'''simple docstring''' import os import sys import unittest lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Tuple = get_test_to_tester_mapping(lowercase ) A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase ) A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""} A_ : Union[str, Any] = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) def _a (self ): A_ : Optional[Any] = get_model_to_test_mapping(lowercase ) A_ : List[str] = get_model_to_test_mapping(lowercase ) A_ : Dict = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } A_ : Any = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) def _a (self ): A_ : List[Any] = get_model_to_tester_mapping(lowercase ) A_ : Optional[int] = get_model_to_tester_mapping(lowercase ) A_ : Dict = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } A_ : Dict = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
686
1
'''simple docstring''' import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : int = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def _a (self , lowercase=0 ): A_ : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase ) ) A_ : Tuple = np.random.RandomState(lowercase ) A_ : Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """strength""": 0.75, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a (self ): A_ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=lowercase ) A_ : List[str] = self.get_dummy_inputs() A_ : Union[str, Any] = pipe(**lowercase ).images A_ : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A_ : Dict = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def _a (self ): A_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) A_ : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : List[Any] = self.get_dummy_inputs() A_ : List[Any] = pipe(**lowercase ).images A_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Tuple = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _a (self ): A_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) A_ : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase ) # warmup pass to apply optimizations A_ : List[str] = pipe(**self.get_dummy_inputs() ) A_ : List[str] = self.get_dummy_inputs() A_ : Optional[int] = pipe(**lowercase ).images A_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : List[str] = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _a (self ): A_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) A_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase ) A_ : List[Any] = self.get_dummy_inputs() A_ : Optional[int] = pipe(**lowercase ).images A_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Tuple = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _a (self ): A_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) A_ : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Optional[int] = self.get_dummy_inputs() A_ : str = pipe(**lowercase ).images A_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Dict = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _a (self ): A_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) A_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Optional[int] = self.get_dummy_inputs() A_ : Tuple = pipe(**lowercase ).images A_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Optional[int] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): @property def _a (self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a (self ): A_ : Optional[int] = ort.SessionOptions() A_ : Any = False return options def _a (self ): A_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) A_ : int = init_image.resize((768, 512) ) # using the PNDM scheduler by default A_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Any = """A fantasy landscape, trending on artstation""" A_ : List[str] = np.random.RandomState(0 ) A_ : Dict = pipe( prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase , output_type="""np""" , ) A_ : Dict = output.images A_ : Union[str, Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A_ : Optional[int] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _a (self ): A_ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) A_ : Optional[Any] = init_image.resize((768, 512) ) A_ : str = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) A_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase ) A_ : str = """A fantasy landscape, trending on artstation""" A_ : Tuple = np.random.RandomState(0 ) A_ : Any = pipe( prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase , output_type="""np""" , ) A_ : Any = output.images A_ : List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A_ : Union[str, Any] = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
686
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCamelCase :Any = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
686
1
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCamelCase :Any = logging.get_logger(__name__) # General docstring lowerCamelCase :Optional[int] = '''RegNetConfig''' # Base docstring lowerCamelCase :str = '''facebook/regnet-y-040''' lowerCamelCase :int = [1, 1_0_8_8, 7, 7] # Image classification docstring lowerCamelCase :Union[str, Any] = '''facebook/regnet-y-040''' lowerCamelCase :Any = '''tabby, tabby cat''' lowerCamelCase :Optional[int] = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class _lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ): super().__init__(**lowercase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb A_ : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) A_ : Optional[Any] = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=lowercase , strides=lowercase , padding="""VALID""" , groups=lowercase , use_bias=lowercase , name="""convolution""" , ) A_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" ) A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def _a (self , lowercase ): A_ : Union[str, Any] = self.convolution(self.padding(lowercase ) ) A_ : Any = self.normalization(lowercase ) A_ : Tuple = self.activation(lowercase ) return hidden_state class _lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , lowercase , **lowercase ): super().__init__(**lowercase ) A_ : List[Any] = config.num_channels A_ : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , ) def _a (self , lowercase ): A_ : Union[str, Any] = shape_list(lowercase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) A_ : Optional[Any] = tf.transpose(lowercase , perm=(0, 2, 3, 1) ) A_ : Optional[Any] = self.embedder(lowercase ) return hidden_state class _lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , lowercase , lowercase = 2 , **lowercase ): super().__init__(**lowercase ) A_ : Tuple = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name="""convolution""" ) A_ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" ) def _a (self , lowercase , lowercase = False ): return self.normalization(self.convolution(lowercase ) , training=lowercase ) class _lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , lowercase , lowercase , **lowercase ): super().__init__(**lowercase ) A_ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name="""pooler""" ) A_ : List[str] = [ tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation="""relu""" , name="""attention.0""" ), tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ), ] def _a (self , lowercase ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] A_ : Dict = self.pooler(lowercase ) for layer_module in self.attention: A_ : str = layer_module(lowercase ) A_ : Optional[int] = hidden_state * pooled return hidden_state class _lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): super().__init__(**lowercase ) A_ : List[Any] = in_channels != out_channels or stride != 1 A_ : Union[str, Any] = max(1 , out_channels // config.groups_width ) A_ : Tuple = ( TFRegNetShortCut(lowercase , stride=lowercase , name="""shortcut""" ) if should_apply_shortcut else tf.keras.layers.Activation("""linear""" , name="""shortcut""" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. A_ : Tuple = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name="""layer.1""" ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name="""layer.2""" ), ] A_ : str = ACTaFN[config.hidden_act] def _a (self , lowercase ): A_ : Any = hidden_state for layer_module in self.layers: A_ : Dict = layer_module(lowercase ) A_ : int = self.shortcut(lowercase ) hidden_state += residual A_ : Any = self.activation(lowercase ) return hidden_state class _lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): super().__init__(**lowercase ) A_ : int = in_channels != out_channels or stride != 1 A_ : Union[str, Any] = max(1 , out_channels // config.groups_width ) A_ : int = ( TFRegNetShortCut(lowercase , stride=lowercase , name="""shortcut""" ) if should_apply_shortcut else tf.keras.layers.Activation("""linear""" , name="""shortcut""" ) ) A_ : Optional[Any] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name="""layer.1""" ), TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name="""layer.3""" ), ] A_ : List[str] = ACTaFN[config.hidden_act] def _a (self , lowercase ): A_ : Union[str, Any] = hidden_state for layer_module in self.layers: A_ : Optional[int] = layer_module(lowercase ) A_ : Optional[int] = self.shortcut(lowercase ) hidden_state += residual A_ : str = self.activation(lowercase ) return hidden_state class _lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ): super().__init__(**lowercase ) A_ : Dict = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer A_ : Any = [ # downsampling is done in the first layer with stride of 2 layer(lowercase , lowercase , lowercase , stride=lowercase , name="""layers.0""" ), *[layer(lowercase , lowercase , lowercase , name=F'layers.{i+1}' ) for i in range(depth - 1 )], ] def _a (self , lowercase ): for layer_module in self.layers: A_ : Any = layer_module(lowercase ) return hidden_state class _lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , lowercase , **lowercase ): super().__init__(**lowercase ) A_ : Optional[Any] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) ) A_ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'stages.{i+1}' ) ) def _a (self , lowercase , lowercase = False , lowercase = True ): A_ : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A_ : int = hidden_states + (hidden_state,) A_ : List[str] = stage_module(lowercase ) if output_hidden_states: A_ : int = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase ) @keras_serializable class _lowerCAmelCase ( tf.keras.layers.Layer ): __SCREAMING_SNAKE_CASE : Optional[int] = RegNetConfig def __init__(self , lowercase , **lowercase ): super().__init__(**lowercase ) A_ : Tuple = config A_ : Optional[Any] = TFRegNetEmbeddings(lowercase , name="""embedder""" ) A_ : Tuple = TFRegNetEncoder(lowercase , name="""encoder""" ) A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name="""pooler""" ) @unpack_inputs def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = False , ): A_ : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict A_ : Optional[int] = self.embedder(lowercase , training=lowercase ) A_ : List[str] = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Tuple = encoder_outputs[0] A_ : Dict = self.pooler(lowercase ) # Change to NCHW output format have uniformity in the modules A_ : List[str] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) A_ : Optional[Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: A_ : Union[str, Any] = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Any = RegNetConfig __SCREAMING_SNAKE_CASE : str = 'regnet' __SCREAMING_SNAKE_CASE : Any = 'pixel_values' @property def _a (self ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} lowerCamelCase :List[Any] = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCamelCase :Any = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , __UpperCAmelCase , ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , *lowercase , **lowercase ): super().__init__(lowercase , *lowercase , **lowercase ) A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name="""regnet""" ) @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _a (self , lowercase , lowercase = None , lowercase = None , lowercase=False , ): A_ : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict A_ : Optional[Any] = self.regnet( pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCAmelCase , ) class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ): def __init__(self , lowercase , *lowercase , **lowercase ): super().__init__(lowercase , *lowercase , **lowercase ) A_ : Optional[Any] = config.num_labels A_ : List[Any] = TFRegNetMainLayer(lowercase , name="""regnet""" ) # classification head A_ : Any = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _a (self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ): A_ : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : str = return_dict if return_dict is not None else self.config.use_return_dict A_ : Optional[Any] = self.regnet( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : List[Any] = outputs.pooler_output if return_dict else outputs[1] A_ : str = self.classifier[0](lowercase ) A_ : Dict = self.classifier[1](lowercase ) A_ : Any = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase ) if not return_dict: A_ : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
686
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCamelCase :Any = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def a ( lowerCamelCase__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCamelCase :Tuple = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCamelCase :List[Any] = parser.parse_args() if args.check_lib: lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''') lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent else: lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
686
1
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if b == 0: return (1, 0) ((A_), (A_)) : Union[str, Any] = extended_euclid(lowerCamelCase__ , a % b ) A_ : List[Any] = a // b return (y, x - k * y) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' ((A_), (A_)) : int = extended_euclid(lowerCamelCase__ , lowerCamelCase__ ) A_ : int = na * na A_ : Dict = ra * x * na + ra * y * na return (n % m + m) % m def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' ((A_), (A_)) : int = extended_euclid(lowerCamelCase__ , lowerCamelCase__ ) if b < 0: A_ : Union[str, Any] = (b % n + n) % n return b def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_, A_ : Optional[int] = invert_modulo(lowerCamelCase__ , lowerCamelCase__ ), invert_modulo(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = na * na A_ : Optional[int] = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
686
'''simple docstring''' lowerCamelCase :dict[tuple[int, int, int], int] = {} def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on A_ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 ) A_ : Optional[Any] = state_late + state_absent + state_ontime A_ : Dict = prizestrings return prizestrings def a ( lowerCamelCase__ = 30 ): '''simple docstring''' return _calculate(lowerCamelCase__ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
686
1
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase :Any = logging.get_logger(__name__) lowerCamelCase :List[Any] = Dict[str, Any] lowerCamelCase :Tuple = List[Prediction] @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , *lowercase , **lowercase ): super().__init__(*lowercase , **lowercase ) if self.framework == "tf": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) requires_backends(self , """vision""" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _a (self , **lowercase ): A_ : List[str] = {} if "threshold" in kwargs: A_ : Optional[int] = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__(self , *lowercase , **lowercase ): return super().__call__(*lowercase , **lowercase ) def _a (self , lowercase ): A_ : Tuple = load_image(lowercase ) A_ : Tuple = torch.IntTensor([[image.height, image.width]] ) A_ : Optional[Any] = self.image_processor(images=[image] , return_tensors="""pt""" ) if self.tokenizer is not None: A_ : List[Any] = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" ) A_ : int = target_size return inputs def _a (self , lowercase ): A_ : Union[str, Any] = model_inputs.pop("""target_size""" ) A_ : Any = self.model(**lowercase ) A_ : List[str] = outputs.__class__({"""target_size""": target_size, **outputs} ) if self.tokenizer is not None: A_ : Union[str, Any] = model_inputs["""bbox"""] return model_outputs def _a (self , lowercase , lowercase=0.9 ): A_ : Dict = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A_, A_ : Optional[int] = target_size[0].tolist() def unnormalize(lowercase ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) A_, A_ : Dict = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A_ : int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A_ : List[Any] = [unnormalize(lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )] A_ : List[str] = ["""score""", """label""", """box"""] A_ : Optional[int] = [dict(zip(lowercase , lowercase ) ) for vals in zip(scores.tolist() , lowercase , lowercase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A_ : List[Any] = self.image_processor.post_process_object_detection(lowercase , lowercase , lowercase ) A_ : Union[str, Any] = raw_annotations[0] A_ : List[str] = raw_annotation["""scores"""] A_ : Any = raw_annotation["""labels"""] A_ : Dict = raw_annotation["""boxes"""] A_ : int = scores.tolist() A_ : Optional[int] = [self.model.config.idalabel[label.item()] for label in labels] A_ : Optional[Any] = [self._get_bounding_box(lowercase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A_ : Tuple = ["""score""", """label""", """box"""] A_ : Optional[Any] = [ dict(zip(lowercase , lowercase ) ) for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] ) ] return annotation def _a (self , lowercase ): if self.framework != "pt": raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" ) A_, A_, A_, A_ : Union[str, Any] = box.int().tolist() A_ : Optional[int] = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
686
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Tuple = 'linear' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial' __SCREAMING_SNAKE_CASE : Optional[int] = 'constant' __SCREAMING_SNAKE_CASE : str = 'constant_with_warmup' __SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant' def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) ) return 1.0 return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' A_ : Optional[Any] = {} A_ : Optional[Any] = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: A_, A_ : Union[str, Any] = rule_str.split(""":""" ) A_ : Union[str, Any] = int(lowerCamelCase__ ) A_ : List[Any] = float(lowerCamelCase__ ) A_ : Union[str, Any] = value A_ : Optional[int] = float(rule_list[-1] ) def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ): def rule_func(lowerCamelCase__ ) -> float: A_ : str = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCamelCase__ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ): '''simple docstring''' A_ : Optional[Any] = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' ) def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A_ : str = lr_init - lr_end A_ : Tuple = num_training_steps - num_warmup_steps A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps A_ : Optional[int] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase :List[Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ): '''simple docstring''' A_ : Optional[Any] = SchedulerType(lowerCamelCase__ ) A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , ) return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
686
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCamelCase :Optional[int] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , *lowercase , **lowercase ): warnings.warn( """The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DPTImageProcessor instead.""" , lowercase , ) super().__init__(*lowercase , **lowercase )
686
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''') lowerCamelCase :int = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } lowerCamelCase :List[str] = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } lowerCamelCase :Union[str, Any] = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } lowerCamelCase :Dict = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } lowerCamelCase :int = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } lowerCamelCase :str = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } lowerCamelCase :List[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } lowerCamelCase :Tuple = [] lowerCamelCase :Dict = [] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for attribute in key.split(""".""" ): A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: A_ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Dict = value elif weight_type == "bias": A_ : Dict = value elif weight_type == "running_mean": A_ : Optional[Any] = value elif weight_type == "running_var": A_ : int = value elif weight_type == "num_batches_tracked": A_ : Optional[Any] = value elif weight_type == "weight_ih_l0": A_ : Optional[int] = value elif weight_type == "weight_hh_l0": A_ : Union[str, Any] = value elif weight_type == "bias_ih_l0": A_ : Optional[int] = value elif weight_type == "bias_hh_l0": A_ : Tuple = value elif weight_type == "weight_ih_l1": A_ : Optional[int] = value elif weight_type == "weight_hh_l1": A_ : Dict = value elif weight_type == "bias_ih_l1": A_ : Optional[int] = value elif weight_type == "bias_hh_l1": A_ : Tuple = value else: A_ : Any = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A_, A_ : List[str] = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = [] if model_name == "encodec_24khz" or "encodec_32khz": A_ : List[str] = MAPPING_24K elif model_name == "encodec_48khz": A_ : str = MAPPING_48K else: raise ValueError(f'Unsupported model: {model_name}' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase__ , lowerCamelCase__ ): logger.info(f'{name} was ignored' ) continue A_ : str = False for key, mapped_key in MAPPING.items(): if "*" in key: A_, A_ : List[Any] = key.split(""".*.""" ) if prefix in name and suffix in name: A_ : Optional[Any] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ): continue A_ : Union[str, Any] = True if "*" in mapped_key: A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2] A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ ) if "weight_g" in name: A_ : Any = """weight_g""" elif "weight_v" in name: A_ : Tuple = """weight_v""" elif "weight_ih_l0" in name: A_ : Union[str, Any] = """weight_ih_l0""" elif "weight_hh_l0" in name: A_ : Tuple = """weight_hh_l0""" elif "bias_ih_l0" in name: A_ : str = """bias_ih_l0""" elif "bias_hh_l0" in name: A_ : List[Any] = """bias_hh_l0""" elif "weight_ih_l1" in name: A_ : Dict = """weight_ih_l1""" elif "weight_hh_l1" in name: A_ : Any = """weight_hh_l1""" elif "bias_ih_l1" in name: A_ : Optional[int] = """bias_ih_l1""" elif "bias_hh_l1" in name: A_ : List[Any] = """bias_hh_l1""" elif "bias" in name: A_ : List[str] = """bias""" elif "weight" in name: A_ : Optional[int] = """weight""" elif "running_mean" in name: A_ : Union[str, Any] = """running_mean""" elif "running_var" in name: A_ : Optional[int] = """running_var""" elif "num_batches_tracked" in name: A_ : List[Any] = """num_batches_tracked""" else: A_ : str = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f'Unused weights: {unused_weights}' ) @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): '''simple docstring''' if config_path is not None: A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ ) else: A_ : Optional[int] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A_ : Dict = [8, 5, 4, 4] A_ : Optional[Any] = [2.2] A_ : Tuple = 64 A_ : Tuple = 3_20_00 A_ : List[Any] = 20_48 A_ : Optional[Any] = False A_ : str = False A_ : Optional[int] = False elif model_name == "encodec_48khz": A_ : Dict = [8, 5, 4, 2] A_ : Tuple = [3.0, 6.0, 12.0, 24.0] A_ : List[Any] = 4_80_00 A_ : Dict = 2 A_ : Dict = False A_ : Dict = """time_group_norm""" A_ : Optional[Any] = True A_ : str = 1.0 A_ : Any = 0.01 else: raise ValueError(f'Unknown model name: {model_name}' ) A_ : Dict = EncodecModel(lowerCamelCase__ ) A_ : Any = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(lowerCamelCase__ ) A_ : int = torch.load(lowerCamelCase__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A_ : Tuple = original_checkpoint["""best_state"""] recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print("""Pushing to the hub...""" ) feature_extractor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Any = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowerCamelCase :Dict = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
686
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase :Dict = { '''configuration_clap''': [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapAudioConfig''', '''ClapConfig''', '''ClapTextConfig''', ], '''processing_clap''': ['''ClapProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :str = [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapModel''', '''ClapPreTrainedModel''', '''ClapTextModel''', '''ClapTextModelWithProjection''', '''ClapAudioModel''', '''ClapAudioModelWithProjection''', ] lowerCamelCase :str = ['''ClapFeatureExtractor'''] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
686
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :Any = logging.get_logger(__name__) lowerCamelCase :Any = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = 'beit' def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ): super().__init__(**lowercase ) A_ : Union[str, Any] = vocab_size A_ : List[str] = hidden_size A_ : Optional[int] = num_hidden_layers A_ : Tuple = num_attention_heads A_ : List[Any] = intermediate_size A_ : Optional[int] = hidden_act A_ : str = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : Dict = initializer_range A_ : str = layer_norm_eps A_ : Any = image_size A_ : int = patch_size A_ : List[str] = num_channels A_ : Any = use_mask_token A_ : Dict = use_absolute_position_embeddings A_ : List[Any] = use_relative_position_bias A_ : Tuple = use_shared_relative_position_bias A_ : Optional[int] = layer_scale_init_value A_ : Tuple = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : Optional[int] = use_auxiliary_head A_ : Union[str, Any] = auxiliary_loss_weight A_ : Tuple = auxiliary_channels A_ : List[Any] = auxiliary_num_convs A_ : Dict = auxiliary_concat_input A_ : Optional[Any] = semantic_loss_ignore_index class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4
686
1
'''simple docstring''' import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = s.rsplit(lowerCamelCase__ , lowerCamelCase__ ) return new.join(lowerCamelCase__ ) def a ( lowerCamelCase__ ): '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : str = {} A_ : Optional[Any] = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: A_ : List[Any] = key.replace(f'{group_key}.' , f'{group_key}.group.' ) if "res_path" in key: A_ : List[str] = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): A_ : Any = rreplace(lowerCamelCase__ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): A_ : List[Any] = rreplace(lowerCamelCase__ , """.b""" , """.bias""" , 1 ) A_ : Optional[Any] = value.float() return upgrade @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=True ): '''simple docstring''' from dall_e import Encoder A_ : List[str] = Encoder() if os.path.exists(lowerCamelCase__ ): A_ : List[Any] = torch.load(lowerCamelCase__ ) else: A_ : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : str = ckpt.state_dict() encoder.load_state_dict(lowerCamelCase__ ) if config_path is not None: A_ : Dict = FlavaImageCodebookConfig.from_pretrained(lowerCamelCase__ ) else: A_ : Optional[Any] = FlavaImageCodebookConfig() A_ : List[str] = FlavaImageCodebook(lowerCamelCase__ ).eval() A_ : Tuple = encoder.state_dict() A_ : Optional[int] = upgrade_state_dict(lowerCamelCase__ ) hf_model.load_state_dict(lowerCamelCase__ ) A_ : List[Any] = hf_model.state_dict() A_ : Optional[int] = count_parameters(lowerCamelCase__ ) A_ : Tuple = count_parameters(lowerCamelCase__ ) assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(lowerCamelCase__ ) else: return hf_state_dict if __name__ == "__main__": lowerCamelCase :Any = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCamelCase :Union[str, Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
686
'''simple docstring''' import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel lowerCamelCase :Optional[int] = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 1_3_1_0_7_2, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, } def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2 def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2 A_ : List[Any] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ ) class _lowerCAmelCase ( __UpperCAmelCase ): pass class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase ): super().__init__() A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 ) A_ : str = deepcopy(self.diffusion ) A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = MODELS_MAP[model_name]["""url"""] os.system(f'wget {url} ./' ) return f'./{model_name}.ckpt' lowerCamelCase :str = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } lowerCamelCase :str = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } lowerCamelCase :str = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } lowerCamelCase :int = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } lowerCamelCase :List[Any] = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } lowerCamelCase :Optional[Any] = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def a ( lowerCamelCase__ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""" , RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'ResConvBlock error with {name}' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def a ( lowerCamelCase__ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ): return name.replace(lowerCamelCase__ , lowerCamelCase__ ) elif name.startswith(lowerCamelCase__ ): return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value] raise ValueError(f'Attn error with {name}' ) def a ( lowerCamelCase__ , lowerCamelCase__=13 ): '''simple docstring''' A_ : Union[str, Any] = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""" , """time_proj""" ) A_ : Dict = 0 if string.startswith("""net.3.""" ): depth += 1 A_ : int = string[6:] elif string.startswith("""net.""" ): A_ : Tuple = string[4:] while string.startswith("""main.7.""" ): depth += 1 A_ : Dict = string[7:] if string.startswith("""main.""" ): A_ : Union[str, Any] = string[5:] # mid block if string[:2].isdigit(): A_ : Optional[Any] = string[:2] A_ : Optional[Any] = string[2:] else: A_ : List[Any] = string[0] A_ : Dict = string[1:] if depth == max_depth: A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num] A_ : Optional[Any] = """mid_block""" elif depth > 0 and int(lowerCamelCase__ ) < 7: A_ : Any = DOWN_NUM_TO_LAYER[layer_num] A_ : Union[str, Any] = f'down_blocks.{depth}' elif depth > 0 and int(lowerCamelCase__ ) > 7: A_ : List[str] = UP_NUM_TO_LAYER[layer_num] A_ : List[str] = f'up_blocks.{max_depth - depth - 1}' elif depth == 0: A_ : str = DEPTH_0_TO_LAYER[layer_num] A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' ) A_ : Optional[int] = string_left[1:] if "resnets" in new_layer: A_ : Tuple = convert_resconv_naming(lowerCamelCase__ ) elif "attentions" in new_layer: A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ ) A_ : Dict = new_string_left if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left else: A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue A_ : List[Any] = rename(lowerCamelCase__ ) # check if we need to transform from Conv => Linear for attention if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: A_ : int = v return new_state_dict def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if len(lowerCamelCase__ ) == 1: if len(v.shape ) == 3: # weight A_ : Optional[Any] = v[:, :, 0] else: # bias A_ : Union[str, Any] = v else: # qkv matrices A_ : Optional[int] = v.shape[0] A_ : str = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0] else: A_ : str = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}' A_ : int = download(lowerCamelCase__ ) A_ : Any = MODELS_MAP[model_name]["""sample_rate"""] A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""] A_ : Tuple = Object() A_ : Union[str, Any] = sample_size A_ : Tuple = sample_rate A_ : int = 0 A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ ) A_ : Optional[Any] = diffusers_model.state_dict() A_ : Dict = DiffusionUncond(lowerCamelCase__ ) orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] ) A_ : Any = orig_model.diffusion_ema.eval() A_ : Any = orig_model.state_dict() A_ : List[str] = rename_orig_weights(lowerCamelCase__ ) A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}' assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}' if key == "time_proj.weight": A_ : str = value.squeeze() A_ : Union[str, Any] = value diffusers_model.load_state_dict(lowerCamelCase__ ) A_ : Optional[Any] = 1_00 A_ : Union[str, Any] = 33 A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ ) A_ : List[str] = torch.manual_seed(lowerCamelCase__ ) A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ ) A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1] A_ : List[Any] = get_crash_schedule(lowerCamelCase__ ) A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ ) A_ : str = torch.manual_seed(33 ) A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} ) A_ : str = generated.clamp(-1 , 1 ) A_ : List[Any] = (generated - audio).abs().sum() A_ : int = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""" , lowerCamelCase__ ) print("""Diff max""" , lowerCamelCase__ ) assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/' print(f'Conversion for {model_name} successful!' ) if __name__ == "__main__": lowerCamelCase :int = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') lowerCamelCase :List[str] = parser.parse_args() main(args)
686
1
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' if any(not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(lowerCamelCase__ ) ): for i, (rod_upper, rod_lower) in enumerate(zip(lowerCamelCase__ , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
686
'''simple docstring''' from math import factorial def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) ) coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
686
1
'''simple docstring''' import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values lowerCamelCase :int = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') lowerCamelCase , lowerCamelCase :Optional[int] = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') lowerCamelCase :Any = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: lowerCamelCase :List[str] = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) lowerCamelCase :str = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"]) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
686
'''simple docstring''' import re def a ( lowerCamelCase__ ): '''simple docstring''' if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
686
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore lowerCamelCase :int = ''' Human: <<task>> Assistant: ''' lowerCamelCase :Union[str, Any] = '''huggingface-tools/default-prompts''' lowerCamelCase :List[Any] = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''} def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="run" ): '''simple docstring''' if prompt_or_repo_id is None: A_ : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("""\\s""" , lowerCamelCase__ ) is not None: return prompt_or_repo_id A_ : str = cached_file( lowerCamelCase__ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} ) with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f: return f.read()
686
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCamelCase__ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCamelCase__ ): http_head("""https://huggingface.co""" )
686
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline __SCREAMING_SNAKE_CASE : str = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[str] = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[str] = False @property def _a (self ): return 32 @property def _a (self ): return 32 @property def _a (self ): return self.time_input_dim @property def _a (self ): return self.time_input_dim * 4 @property def _a (self ): return 100 @property def _a (self ): torch.manual_seed(0 ) A_ : int = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } A_ : Union[str, Any] = UNetaDConditionModel(**lowercase ) return model @property def _a (self ): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _a (self ): torch.manual_seed(0 ) A_ : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def _a (self ): A_ : Union[str, Any] = self.dummy_unet A_ : Union[str, Any] = self.dummy_movq A_ : Optional[int] = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } A_ : List[str] = DDIMScheduler(**lowercase ) A_ : Dict = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _a (self , lowercase , lowercase=0 ): A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase ) A_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowercase ) # create init_image A_ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase ) A_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : Optional[Any] = Image.fromarray(np.uinta(lowercase ) ).convert("""RGB""" ).resize((256, 256) ) # create hint A_ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase ) if str(lowercase ).startswith("""mps""" ): A_ : Dict = torch.manual_seed(lowercase ) else: A_ : Any = torch.Generator(device=lowercase ).manual_seed(lowercase ) A_ : Any = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def _a (self ): A_ : Tuple = """cpu""" A_ : Dict = self.get_dummy_components() A_ : List[Any] = self.pipeline_class(**lowercase ) A_ : Any = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Dict = pipe(**self.get_dummy_inputs(lowercase ) ) A_ : Tuple = output.images A_ : int = pipe( **self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0] A_ : List[Any] = image[0, -3:, -3:, -1] A_ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : Any = np.array( [0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _a (self ): A_ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" ) A_ : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) A_ : Optional[int] = init_image.resize((512, 512) ) A_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) A_ : int = torch.from_numpy(np.array(lowercase ) ).float() / 2_55.0 A_ : List[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) A_ : Optional[int] = """A robot, 4k photo""" A_ : Union[str, Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(lowercase ) A_ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa ) A_ : str = pipeline.to(lowercase ) pipeline.set_progress_bar_config(disable=lowercase ) A_ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 ) A_, A_ : List[str] = pipe_prior( lowercase , image=lowercase , strength=0.85 , generator=lowercase , negative_prompt="""""" , ).to_tuple() A_ : Union[str, Any] = pipeline( image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , hint=lowercase , generator=lowercase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , ) A_ : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(lowercase , lowercase )
686
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCamelCase :Any = re.compile(R'''\s+''') def a ( lowerCamelCase__ ): '''simple docstring''' return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def a ( lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""] A_ : Optional[int] = example["""content"""].splitlines() for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ): '''simple docstring''' A_ : Any = ["""unit tests""", """test file""", """configuration file"""] A_ : List[str] = example["""content"""].splitlines() A_ : str = 0 A_ : Union[str, Any] = 0 # first test for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test A_ : List[Any] = example["""content"""].count("""\n""" ) A_ : Any = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = ["""def """, """class """, """for """, """while """] A_ : Optional[int] = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def a ( lowerCamelCase__ , lowerCamelCase__=4 ): '''simple docstring''' A_ : Tuple = example["""content"""].splitlines() A_ : int = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""] A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ ) return {"ratio": ratio} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = {} results.update(get_hash(lowerCamelCase__ ) ) results.update(line_stats(lowerCamelCase__ ) ) results.update(alpha_stats(lowerCamelCase__ ) ) results.update(char_token_ratio(lowerCamelCase__ ) ) results.update(is_autogenerated(lowerCamelCase__ ) ) results.update(is_config_or_test(lowerCamelCase__ ) ) results.update(has_no_keywords(lowerCamelCase__ ) ) results.update(has_few_assignments(lowerCamelCase__ ) ) return results def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def a ( lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ , """rb""" ) as f_in: with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ ) os.unlink(lowerCamelCase__ ) # Settings lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments) lowerCamelCase :Tuple = parser.parse_args() if args.num_workers is None: lowerCamelCase :Tuple = multiprocessing.cpu_count() lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCamelCase :List[Any] = time.time() lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''') print(F"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCamelCase :int = time.time() lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers) print(F"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCamelCase :int = set(ds.unique('''hash''')) lowerCamelCase :List[str] = len(uniques) / len(ds) print(F"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCamelCase :Dict = time.time() lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"Time to filter dataset: {time.time()-t_start:.2f}") print(F"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCamelCase :List[str] = time.time() lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(F"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCamelCase :int = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) lowerCamelCase :Tuple = output_dir / '''data''' data_dir.mkdir(exist_ok=True) lowerCamelCase :Tuple = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json") lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"Time to save dataset: {time.time()-t_start:.2f}")
686
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) lowerCamelCase :Any = { '''MIT/ast-finetuned-audioset-10-10-0.4593''': ( '''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json''' ), } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = 'audio-spectrogram-transformer' def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=1024 , lowercase=128 , **lowercase , ): super().__init__(**lowercase ) A_ : List[str] = hidden_size A_ : Optional[int] = num_hidden_layers A_ : int = num_attention_heads A_ : List[Any] = intermediate_size A_ : Union[str, Any] = hidden_act A_ : Tuple = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : List[str] = initializer_range A_ : Tuple = layer_norm_eps A_ : List[Any] = patch_size A_ : Optional[Any] = qkv_bias A_ : Tuple = frequency_stride A_ : str = time_stride A_ : Tuple = max_length A_ : List[str] = num_mel_bins
686
'''simple docstring''' import pytest lowerCamelCase :Optional[Any] = '''__dummy_dataset1__''' lowerCamelCase :List[Any] = ''' import json import os import datasets REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", ] ) ), "langs": datasets.Sequence(datasets.Value("string")), "spans": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, json.loads(line) ''' @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = dataset_loading_script_name A_ : int = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase__ ) A_ : Tuple = script_dir / f'{script_name}.py' with open(lowerCamelCase__ , """w""" ) as f: f.write(lowerCamelCase__ ) return str(lowerCamelCase__ )
686
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=__UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ['transformers', 'torch', 'note_seq'] def __init__(self , *lowercase , **lowercase ): requires_backends(self , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def _a (cls , *lowercase , **lowercase ): requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def _a (cls , *lowercase , **lowercase ): requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
686
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split lowerCamelCase :int = datasets.load_iris() lowerCamelCase :str = np.array(data['''data''']) lowerCamelCase :Dict = np.array(data['''target''']) lowerCamelCase :Union[str, Any] = data['''target_names'''] lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified A_ : List[str] = [] for data_point in data: A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
686
1
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if gpta_config_file == "": A_ : Optional[int] = GPTaConfig() else: A_ : Optional[Any] = GPTaConfig.from_json_file(lowerCamelCase__ ) A_ : List[str] = GPTaModel(lowerCamelCase__ ) # Load weights from numpy load_tf_weights_in_gpta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model A_ : int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME A_ : Union[str, Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , lowerCamelCase__ ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCamelCase :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) lowerCamelCase :Optional[Any] = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
686
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): def _a (self , lowercase ): if isinstance(lowercase , lowercase ): A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()] return labels def __call__(self , lowercase , lowercase , lowercase ): if len(lowercase ) == 0 or len(lowercase ) == 0: raise ValueError("""You must include at least one label and at least one sequence.""" ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( """The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """ """Make sure the passed template includes formatting syntax such as {{}} where the label should go.""" ).format(lowercase ) ) if isinstance(lowercase , lowercase ): A_ : Tuple = [sequences] A_ : int = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ): A_ : int = args_parser super().__init__(*lowercase , **lowercase ) if self.entailment_id == -1: logger.warning( """Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """ """-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" ) @property def _a (self ): for label, ind in self.model.config.labelaid.items(): if label.lower().startswith("""entail""" ): return ind return -1 def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ): A_ : Any = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( """Tokenizer was not supporting padding necessary for zero-shot, attempting to use """ """ `pad_token=eos_token`""" ) A_ : str = self.tokenizer.eos_token try: A_ : str = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , ) except Exception as e: if "too short" in str(lowercase ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. A_ : Any = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def _a (self , **lowercase ): if kwargs.get("""multi_class""" , lowercase ) is not None: A_ : Tuple = kwargs["""multi_class"""] logger.warning( """The `multi_class` argument has been deprecated and renamed to `multi_label`. """ """`multi_class` will be removed in a future version of Transformers.""" ) A_ : Optional[Any] = {} if "candidate_labels" in kwargs: A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] ) if "hypothesis_template" in kwargs: A_ : List[str] = kwargs["""hypothesis_template"""] A_ : List[Any] = {} if "multi_label" in kwargs: A_ : Optional[Any] = kwargs["""multi_label"""] return preprocess_params, {}, postprocess_params def __call__(self , lowercase , *lowercase , **lowercase , ): if len(lowercase ) == 0: pass elif len(lowercase ) == 1 and "candidate_labels" not in kwargs: A_ : Union[str, Any] = args[0] else: raise ValueError(F'Unable to understand extra arguments {args}' ) return super().__call__(lowercase , **lowercase ) def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ): A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ): A_ : List[Any] = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowercase ) - 1, **model_input, } def _a (self , lowercase ): A_ : Optional[Any] = inputs["""candidate_label"""] A_ : List[Any] = inputs["""sequence"""] A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names} A_ : List[str] = self.model(**lowercase ) A_ : str = { """candidate_label""": candidate_label, """sequence""": sequence, """is_last""": inputs["""is_last"""], **outputs, } return model_outputs def _a (self , lowercase , lowercase=False ): A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs] A_ : str = [outputs["""sequence"""] for outputs in model_outputs] A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] ) A_ : Dict = logits.shape[0] A_ : Any = len(lowercase ) A_ : List[str] = N // n A_ : Tuple = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowercase ) == 1: # softmax over the entailment vs. contradiction dim for each label independently A_ : Union[str, Any] = self.entailment_id A_ : Any = -1 if entailment_id == 0 else 0 A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]] A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase ) A_ : Optional[Any] = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels A_ : Optional[int] = reshaped_outputs[..., self.entailment_id] A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase ) A_ : Any = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
686
1
'''simple docstring''' def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError("""String lengths must match!""" ) A_ : List[str] = 0 for chara, chara in zip(lowerCamelCase__ , lowerCamelCase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
686
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :int = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'yolos' def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ): super().__init__(**lowercase ) A_ : List[Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Any = num_attention_heads A_ : Any = intermediate_size A_ : int = hidden_act A_ : Optional[Any] = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : List[str] = initializer_range A_ : Optional[Any] = layer_norm_eps A_ : List[str] = image_size A_ : str = patch_size A_ : int = num_channels A_ : Optional[int] = qkv_bias A_ : List[Any] = num_detection_tokens A_ : Tuple = use_mid_position_embeddings A_ : int = auxiliary_loss # Hungarian matcher A_ : int = class_cost A_ : List[Any] = bbox_cost A_ : Optional[int] = giou_cost # Loss coefficients A_ : Any = bbox_loss_coefficient A_ : List[Any] = giou_loss_coefficient A_ : str = eos_coefficient class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4 @property def _a (self ): return 12
686
1
'''simple docstring''' from math import sqrt def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and ( number >= 0 ), "'number' must been an int and positive" A_ : int = True # 0 and 1 are none primes. if number <= 1: A_ : Any = False for divisor in range(2 , int(round(sqrt(lowerCamelCase__ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: A_ : str = False break # precondition assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'status' must been from type bool" return status def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N A_ : List[str] = list(range(2 , n + 1 ) ) A_ : List[str] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCamelCase__ ) ): for j in range(i + 1 , len(lowerCamelCase__ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): A_ : Tuple = 0 # filters actual prime numbers. A_ : Optional[Any] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'ans' must been from type list" return ans def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n > 2), "'N' must been an int and > 2" A_ : int = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCamelCase__ ): ans.append(lowerCamelCase__ ) # precondition assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'ans' must been from type list" return ans def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and number >= 0, "'number' must been an int and >= 0" A_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. A_ : List[Any] = 2 A_ : int = number if number == 0 or number == 1: ans.append(lowerCamelCase__ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCamelCase__ ): while quotient != 1: if is_prime(lowerCamelCase__ ) and (quotient % factor == 0): ans.append(lowerCamelCase__ ) quotient /= factor else: factor += 1 else: ans.append(lowerCamelCase__ ) # precondition assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'ans' must been from type list" return ans def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and ( number >= 0 ), "'number' bust been an int and >= 0" A_ : int = 0 # prime factorization of 'number' A_ : Dict = prime_factorization(lowerCamelCase__ ) A_ : List[Any] = max(lowerCamelCase__ ) # precondition assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'ans' must been from type int" return ans def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and ( number >= 0 ), "'number' bust been an int and >= 0" A_ : Any = 0 # prime factorization of 'number' A_ : Optional[Any] = prime_factorization(lowerCamelCase__ ) A_ : Optional[int] = min(lowerCamelCase__ ) # precondition assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'ans' must been from type int" return ans def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCamelCase__ ), "compare bust been from type bool" return number % 2 == 0 def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCamelCase__ ), "compare bust been from type bool" return number % 2 != 0 def a ( lowerCamelCase__ ): '''simple docstring''' assert ( isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (number > 2) and is_even(lowerCamelCase__ ) ), "'number' must been an int, even and > 2" A_ : Tuple = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' A_ : Any = get_prime_numbers(lowerCamelCase__ ) A_ : Union[str, Any] = len(lowerCamelCase__ ) # run variable for while-loops. A_ : str = 0 A_ : Optional[int] = None # exit variable. for break up the loops A_ : List[str] = True while i < len_pn and loop: A_ : Optional[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: A_ : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (len(lowerCamelCase__ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' assert ( isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." A_ : Any = 0 while numbera != 0: A_ : Optional[int] = numbera % numbera A_ : Tuple = numbera A_ : Optional[Any] = rest # precondition assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' assert ( isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." A_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' A_ : Tuple = prime_factorization(lowerCamelCase__ ) A_ : str = prime_factorization(lowerCamelCase__ ) elif numbera == 1 or numbera == 1: A_ : Dict = [] A_ : Tuple = [] A_ : Optional[Any] = max(lowerCamelCase__ , lowerCamelCase__ ) A_ : Any = 0 A_ : Optional[Any] = 0 A_ : int = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: A_ : Dict = prime_fac_a.count(lowerCamelCase__ ) A_ : int = prime_fac_a.count(lowerCamelCase__ ) for _ in range(max(lowerCamelCase__ , lowerCamelCase__ ) ): ans *= n else: A_ : List[Any] = prime_fac_a.count(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ): ans *= n done.append(lowerCamelCase__ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: A_ : int = prime_fac_a.count(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ): ans *= n done.append(lowerCamelCase__ ) # precondition assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n >= 0), "'number' must been a positive int" A_ : Any = 0 A_ : Optional[Any] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCamelCase__ ): ans += 1 # precondition assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and is_prime( lowerCamelCase__ ), "'ans' must been a prime number and from type int" return ans def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' assert ( is_prime(lowerCamelCase__ ) and is_prime(lowerCamelCase__ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" A_ : Tuple = p_number_a + 1 # jump to the next number A_ : Optional[int] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCamelCase__ ): number += 1 while number < p_number_a: ans.append(lowerCamelCase__ ) number += 1 # fetch the next prime number. while not is_prime(lowerCamelCase__ ): number += 1 # precondition assert ( isinstance(lowerCamelCase__ , lowerCamelCase__ ) and ans[0] != p_number_a and ans[len(lowerCamelCase__ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n >= 1), "'n' must been int and >= 1" A_ : Tuple = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCamelCase__ ) # precondition assert ans[0] == 1 and ans[len(lowerCamelCase__ ) - 1] == n, "Error in function getDivisiors(...)" return ans def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and ( number > 1 ), "'number' must been an int and >= 1" A_ : Optional[int] = get_divisors(lowerCamelCase__ ) # precondition assert ( isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (divisors[0] == 1) and (divisors[len(lowerCamelCase__ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' assert ( isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. A_ : Union[str, Any] = gcd(abs(lowerCamelCase__ ) , abs(lowerCamelCase__ ) ) # precondition assert ( isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n >= 0), "'n' must been a int and >= 0" A_ : Dict = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n >= 0), "'n' must been an int and >= 0" A_ : List[Any] = 0 A_ : Tuple = 1 A_ : int = 1 # this will be return for _ in range(n - 1 ): A_ : Tuple = ans ans += fiba A_ : List[str] = tmp return ans
686
'''simple docstring''' from jiwer import compute_measures import datasets lowerCamelCase :int = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' lowerCamelCase :int = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' lowerCamelCase :Optional[Any] = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def _a (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ] , ) def _a (self , lowercase=None , lowercase=None , lowercase=False ): if concatenate_texts: return compute_measures(lowercase , lowercase )["wer"] else: A_ : List[Any] = 0 A_ : Optional[int] = 0 for prediction, reference in zip(lowercase , lowercase ): A_ : Any = compute_measures(lowercase , lowercase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
686
1
'''simple docstring''' def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Tuple = len(lowerCamelCase__ ) + 1 A_ : str = len(lowerCamelCase__ ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. A_ : Any = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )] # since string of zero length match pattern of zero length A_ : Optional[Any] = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , lowerCamelCase__ ): A_ : Union[str, Any] = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , lowerCamelCase__ ): A_ : int = dp[0][j - 2] if pattern[j - 1] == """*""" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , lowerCamelCase__ ): for j in range(1 , lowerCamelCase__ ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": A_ : Any = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: A_ : Any = 1 elif pattern[j - 2] in (input_string[i - 1], "."): A_ : Optional[Any] = dp[i - 1][j] else: A_ : Optional[int] = 0 else: A_ : Optional[Any] = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") lowerCamelCase :List[str] = '''aab''' lowerCamelCase :Dict = '''c*a*b''' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F"{input_string} matches the given pattern {pattern}") else: print(F"{input_string} does not match with the given pattern {pattern}")
686
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline __SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'negative_prompt', 'height', 'width', 'negative_prompt_embeds', } __SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'} __SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} ) __SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS __SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def _a (self ): torch.manual_seed(0 ) A_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) A_ : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , ) torch.manual_seed(0 ) A_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) A_ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) A_ : int = CLIPTextModel(lowercase ) A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) A_ : Any = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a (self , lowercase , lowercase=0 ): A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase ) A_ : int = image / 2 + 0.5 if str(lowercase ).startswith("""mps""" ): A_ : int = torch.manual_seed(lowercase ) else: A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase ) A_ : Union[str, Any] = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def _a (self ): A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ : Optional[Any] = self.get_dummy_components() A_ : Any = CycleDiffusionPipeline(**lowercase ) A_ : int = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : int = self.get_dummy_inputs(lowercase ) A_ : str = pipe(**lowercase ) A_ : str = output.images A_ : Dict = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _a (self ): A_ : Dict = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase , """half""" ): A_ : List[str] = module.half() A_ : List[Any] = CycleDiffusionPipeline(**lowercase ) A_ : Optional[Any] = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Any = self.get_dummy_inputs(lowercase ) A_ : Tuple = pipe(**lowercase ) A_ : List[str] = output.images A_ : Union[str, Any] = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _a (self ): return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def _a (self ): return super().test_inference_batch_single_identical() @skip_mps def _a (self ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def _a (self ): return super().test_save_load_optional_components() @skip_mps def _a (self ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _a (self ): A_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) A_ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) A_ : List[str] = init_image.resize((512, 512) ) A_ : Dict = """CompVis/stable-diffusion-v1-4""" A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" ) A_ : Any = CycleDiffusionPipeline.from_pretrained( lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : str = """A black colored car""" A_ : Dict = """A blue colored car""" A_ : Union[str, Any] = torch.manual_seed(0 ) A_ : Optional[int] = pipe( prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , ) A_ : str = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def _a (self ): A_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) A_ : Optional[int] = init_image.resize((512, 512) ) A_ : Optional[int] = """CompVis/stable-diffusion-v1-4""" A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" ) A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : Optional[Any] = """A black colored car""" A_ : int = """A blue colored car""" A_ : str = torch.manual_seed(0 ) A_ : Any = pipe( prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , ) A_ : int = output.images assert np.abs(image - expected_image ).max() < 2E-2
686
1
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = ['image_processor', 'tokenizer'] __SCREAMING_SNAKE_CASE : List[Any] = 'AutoImageProcessor' __SCREAMING_SNAKE_CASE : int = 'AutoTokenizer' def __init__(self , lowercase=None , lowercase=None , **lowercase ): A_ : List[str] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase , ) A_ : int = kwargs.pop("""feature_extractor""" ) A_ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase , lowercase ) A_ : str = self.image_processor A_ : Tuple = False def __call__(self , *lowercase , **lowercase ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowercase , **lowercase ) A_ : Dict = kwargs.pop("""images""" , lowercase ) A_ : Any = kwargs.pop("""text""" , lowercase ) if len(lowercase ) > 0: A_ : Union[str, Any] = args[0] A_ : Tuple = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: A_ : Tuple = self.image_processor(lowercase , *lowercase , **lowercase ) if text is not None: A_ : int = self.tokenizer(lowercase , **lowercase ) if text is None: return inputs elif images is None: return encodings else: A_ : int = encodings["""input_ids"""] return inputs def _a (self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def _a (self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @contextmanager def _a (self ): warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) A_ : Optional[int] = True A_ : List[str] = self.tokenizer yield A_ : Any = self.image_processor A_ : Optional[Any] = False def _a (self , lowercase , lowercase=False , lowercase=None ): if added_vocab is None: A_ : int = self.tokenizer.get_added_vocab() A_ : List[Any] = {} while tokens: A_ : Optional[Any] = re.search(R"""<s_(.*?)>""" , lowercase , re.IGNORECASE ) if start_token is None: break A_ : Dict = start_token.group(1 ) A_ : str = re.search(RF'</s_{key}>' , lowercase , re.IGNORECASE ) A_ : Optional[Any] = start_token.group() if end_token is None: A_ : str = tokens.replace(lowercase , """""" ) else: A_ : Dict = end_token.group() A_ : str = re.escape(lowercase ) A_ : str = re.escape(lowercase ) A_ : Dict = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , lowercase , re.IGNORECASE ) if content is not None: A_ : Optional[Any] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node A_ : str = self.tokenajson(lowercase , is_inner_value=lowercase , added_vocab=lowercase ) if value: if len(lowercase ) == 1: A_ : List[str] = value[0] A_ : Tuple = value else: # leaf nodes A_ : Any = [] for leaf in content.split(R"""<sep/>""" ): A_ : Optional[Any] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": A_ : Optional[int] = leaf[1:-2] # for categorical special tokens output[key].append(lowercase ) if len(output[key] ) == 1: A_ : List[Any] = output[key][0] A_ : Optional[int] = tokens[tokens.find(lowercase ) + len(lowercase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowercase , added_vocab=lowercase ) if len(lowercase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _a (self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , ) return self.image_processor_class @property def _a (self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , ) return self.image_processor
686
'''simple docstring''' import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[str] = 'down' def _a (self ): A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'down' def _a (self ): A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' def _a (self ): A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' def _a (self ): A_, A_ : str = super().prepare_init_args_and_inputs_for_common() A_ : Optional[Any] = 32 return init_dict, inputs_dict def _a (self ): A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Union[str, Any] = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _a (self ): A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' @property def _a (self ): return super().get_dummy_input(include_skip_sample=lowercase ) def _a (self ): A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Union[str, Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_skip_sample=lowercase ) def _a (self ): A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Any = 'down' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : int = { """in_channels""": 32, """out_channels""": 32, } A_ : Any = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : Optional[Any] = { """in_channels""": 32, """out_channels""": 32, } A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'mid' def _a (self ): A_ : Optional[Any] = { """in_channels""": 32, """temb_channels""": 128, } A_ : Any = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'mid' def _a (self ): A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common() A_ : List[str] = 32 return init_dict, inputs_dict def _a (self ): A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'mid' @property def _a (self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common() A_ : Optional[int] = 32 return init_dict, inputs_dict def _a (self ): A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : str = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Any = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Union[str, Any] = 32 return init_dict, inputs_dict def _a (self ): A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : int = 32 return init_dict, inputs_dict def _a (self ): A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[str] = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _a (self ): A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : str = 'up' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32} A_ : Optional[int] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'up' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32} A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68] super().test_output(lowercase )
686
1
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptTokenizer __SCREAMING_SNAKE_CASE : Any = False def _a (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A_ : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] A_ : Union[str, Any] = dict(zip(lowercase , range(len(lowercase ) ) ) ) A_ : List[str] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(lowercase ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(lowercase ) ) def _a (self , lowercase ): A_ : Dict = """lower newer""" A_ : str = """lower newer""" return input_text, output_text def _a (self ): A_ : str = BioGptTokenizer(self.vocab_file , self.merges_file ) A_ : List[str] = """lower""" A_ : str = ["""low""", """er</w>"""] A_ : Optional[int] = tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : Optional[Any] = tokens + ["""<unk>"""] A_ : Tuple = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) @slow def _a (self ): A_ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) A_ : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase ) A_ : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase ) A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase ) A_ : int = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
686
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ = None ): '''simple docstring''' A_ : List[Any] = word_bank or [] # create a table A_ : int = len(lowerCamelCase__ ) + 1 A_ : list[list[list[str]]] = [] for _ in range(lowerCamelCase__ ): table.append([] ) # seed value A_ : Any = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowerCamelCase__ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowerCamelCase__ )] == word: A_ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowerCamelCase__ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowerCamelCase__ )]: combination.reverse() return table[len(lowerCamelCase__ )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
686
1
'''simple docstring''' import math def a ( lowerCamelCase__ ): '''simple docstring''' assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False A_ : List[Any] = range(3 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def a ( lowerCamelCase__ , lowerCamelCase__=1 , **lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = factor * value A_ : List[str] = value while not is_prime(lowerCamelCase__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **lowerCamelCase__ ) return value
686
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = [] A_ : int = set({"""(""", """[""", """{"""} ) A_ : Union[str, Any] = set({""")""", """]""", """}"""} ) A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(lowerCamelCase__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowerCamelCase__ ) == 0 def a ( ): '''simple docstring''' A_ : int = input("""Enter sequence of brackets: """ ) if is_balanced(lowerCamelCase__ ): print(lowerCamelCase__ , """is balanced""" ) else: print(lowerCamelCase__ , """is not balanced""" ) if __name__ == "__main__": main()
686
1
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = False while is_sorted is False: # Until all the indices are traversed keep looping A_ : List[Any] = True for i in range(0 , len(lowerCamelCase__ ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: A_, A_ : int = input_list[i + 1], input_list[i] # swapping if elements not in order A_ : Any = False for i in range(1 , len(lowerCamelCase__ ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: A_, A_ : Dict = input_list[i + 1], input_list[i] # swapping if elements not in order A_ : Dict = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') lowerCamelCase :List[Any] = [int(x) for x in input().split()] # inputing elements of the list in one line lowerCamelCase :Union[str, Any] = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
686
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ): super().__init__( lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , ) A_ : Optional[int] = field A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths} A_ : Optional[Any] = Json( cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , ) def _a (self ): # Build iterable dataset if self.streaming: A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A_ : int = None A_ : Union[str, Any] = None A_ : int = None A_ : List[str] = None self.builder.download_and_prepare( download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , ) A_ : str = self.builder.as_dataset( split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory ) return dataset class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ): if num_proc is not None and num_proc <= 0: raise ValueError(F'num_proc {num_proc} must be an integer > 0.' ) A_ : Any = dataset A_ : List[str] = path_or_buf A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A_ : Optional[Any] = num_proc A_ : List[Any] = """utf-8""" A_ : int = to_json_kwargs def _a (self ): A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase ) A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" ) A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False ) A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True ) A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'`datasets` currently does not support {compression} compression' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer: A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'The compression parameter is not supported when writing to a buffer, but compression={compression}' """ was passed. Please provide a local path instead.""" ) A_ : Union[str, Any] = self._write( file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs ) return written def _a (self , lowercase ): A_, A_, A_, A_, A_ : List[str] = args A_ : List[str] = query_table( table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , ) A_ : Any = batch.to_pandas().to_json( path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ): A_ : Dict = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(lowercase ) else: A_, A_ : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(lowercase ) return written
686
1
'''simple docstring''' def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' def count_of_possible_combinations(lowerCamelCase__ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' def count_of_possible_combinations_with_dp_array( lowerCamelCase__ , lowerCamelCase__ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A_ : int = sum( count_of_possible_combinations_with_dp_array(target - item , lowerCamelCase__ ) for item in array ) A_ : List[str] = answer return answer A_ : Optional[int] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = [0] * (target + 1) A_ : int = 1 for i in range(1 , target + 1 ): for j in range(lowerCamelCase__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase :Dict = 3 lowerCamelCase :int = 5 lowerCamelCase :Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
686
'''simple docstring''' import os import sys import unittest lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Tuple = get_test_to_tester_mapping(lowercase ) A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase ) A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""} A_ : Union[str, Any] = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) def _a (self ): A_ : Optional[Any] = get_model_to_test_mapping(lowercase ) A_ : List[str] = get_model_to_test_mapping(lowercase ) A_ : Dict = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } A_ : Any = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) def _a (self ): A_ : List[Any] = get_model_to_tester_mapping(lowercase ) A_ : Optional[int] = get_model_to_tester_mapping(lowercase ) A_ : Dict = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } A_ : Dict = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
686
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase :Any = { '''configuration_mobilenet_v2''': [ '''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileNetV2Config''', '''MobileNetV2OnnxConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = ['''MobileNetV2FeatureExtractor'''] lowerCamelCase :Optional[Any] = ['''MobileNetV2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileNetV2ForImageClassification''', '''MobileNetV2ForSemanticSegmentation''', '''MobileNetV2Model''', '''MobileNetV2PreTrainedModel''', '''load_tf_weights_in_mobilenet_v2''', ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
686
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCamelCase :Any = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
686
1
'''simple docstring''' import os import jsonlines import numpy as np from tqdm import tqdm lowerCamelCase :List[str] = 2_0_4_8 lowerCamelCase :Union[str, Any] = 4_0_9_6 lowerCamelCase :List[str] = 4_2 lowerCamelCase :Any = os.environ.pop('''PROCESS_TRAIN''', '''false''') lowerCamelCase :List[Any] = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4} def a ( lowerCamelCase__ ): '''simple docstring''' def choose_first(lowerCamelCase__ , lowerCamelCase__=False ): assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) == 1: A_ : int = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: A_ : int = {k: [a[k]] for k in a} if len(a["""start_token"""] ) > 0: break return a A_ : int = {"""id""": example["""id"""]} A_ : List[str] = example["""annotations"""] A_ : Any = annotation["""yes_no_answer"""] if 0 in yes_no_answer or 1 in yes_no_answer: A_ : Tuple = ["""yes"""] if 1 in yes_no_answer else ["""no"""] A_ : Dict = [] A_ : List[str] = [] A_ : Dict = ["""<cls>"""] else: A_ : Optional[Any] = ["""short"""] A_ : str = choose_first(annotation["""short_answers"""] ) if len(out["""start_token"""] ) == 0: # answer will be long if short is not available A_ : Optional[int] = ["""long"""] A_ : Dict = choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase__ ) A_ : List[Any] = [] answer.update(lowerCamelCase__ ) # disregard some samples if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]: A_ : List[str] = True else: A_ : Any = False A_ : List[str] = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""] if not all(isinstance(answer[k] , lowerCamelCase__ ) for k in cols ): raise ValueError("""Issue in ID""" , example["""id"""] ) return answer def a ( lowerCamelCase__ , lowerCamelCase__=False ): '''simple docstring''' A_ : Tuple = _get_single_answer(lowerCamelCase__ ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element A_ : List[str] = example["""document"""]["""tokens"""] A_ : int = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) return { "context": " ".join(lowerCamelCase__ ), "answer": { "start_token": -1_00, # ignore index in cross-entropy "end_token": -1_00, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples A_ : Optional[Any] = ["""start_token""", """end_token"""] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 A_ : Optional[Any] = example["""document"""]["""tokens"""] A_ : Optional[int] = answer["""start_token"""] A_ : Optional[Any] = answer["""end_token"""] A_ : str = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 A_ : Tuple = """ """.join(context[start_token:end_token] ) # checking above code if assertion: A_ : Optional[int] = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]] A_ : Any = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]] A_ : int = """ """.join([old[i] for i in range(len(lowerCamelCase__ ) ) if not is_html[i]] ) if new != old: print("""ID:""" , example["""id"""] ) print("""New:""" , lowerCamelCase__ , end="""\n""" ) print("""Old:""" , lowerCamelCase__ , end="""\n\n""" ) return { "context": " ".join(lowerCamelCase__ ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=20_48 , lowerCamelCase__=40_96 , lowerCamelCase__=True ): '''simple docstring''' A_ : Dict = get_context_and_ans(lowerCamelCase__ , assertion=lowerCamelCase__ ) A_ : Tuple = out["""answer"""] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } A_ : Any = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids A_ : str = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element A_ : Tuple = [] A_ : str = [] A_ : Optional[int] = input_ids[:q_len] A_ : Optional[Any] = range(lowerCamelCase__ , len(lowerCamelCase__ ) , max_length - doc_stride ) for i in doc_start_indices: A_ : Dict = i + max_length - q_len A_ : List[Any] = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer["""category"""][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-1_00] * len(lowerCamelCase__ ), "end_token": [-1_00] * len(lowerCamelCase__ ), "category": category, }, } A_ : Union[str, Any] = out["""context"""].split() A_ : Any = splitted_context[answer["""end_token"""]] A_ : List[str] = len( tokenizer( """ """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase__ , ).input_ids ) A_ : Optional[int] = len( tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase__ ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token A_ : Union[str, Any] = len(tokenizer(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 A_ : str = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive A_ : Dict = answer["""start_token"""] A_ : str = answer["""end_token"""] if assertion: A_ : Dict = tokenizer.decode(lowerCamelCase__ ) if answer["span"] != new: print("""ISSUE IN TOKENIZATION""" ) print("""OLD:""" , answer["""span"""] ) print("""NEW:""" , lowerCamelCase__ , end="""\n\n""" ) if len(lowerCamelCase__ ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } A_ : Optional[Any] = input_ids[:q_len] A_ : Optional[int] = range(lowerCamelCase__ , len(lowerCamelCase__ ) , max_length - doc_stride ) A_ : Tuple = [] A_ : Dict = [] A_ : Tuple = [] A_ : Optional[int] = [] # null, yes, no, long, short for i in doc_start_indices: A_ : List[Any] = i + max_length - q_len A_ : Union[str, Any] = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: A_ : Any = start_token - i + q_len A_ : Dict = end_token - i + q_len answers_category.append(answer["""category"""][0] ) # ["short"] -> "short" else: A_ : str = -1_00 A_ : Union[str, Any] = -1_00 answers_category.append("""null""" ) A_ : List[Any] = inputs[-1][start_token : end_token + 1] answers_start_token.append(lowerCamelCase__ ) answers_end_token.append(lowerCamelCase__ ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print("""ISSUE in strided for ID:""" , example["""id"""] ) print("""New:""" , tokenizer.decode(lowerCamelCase__ ) ) print("""Old:""" , tokenizer.decode(lowerCamelCase__ ) , end="""\n\n""" ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=20_48 , lowerCamelCase__=40_96 , lowerCamelCase__=False ): '''simple docstring''' A_ : List[Any] = get_strided_contexts_and_ans( lowerCamelCase__ , lowerCamelCase__ , doc_stride=lowerCamelCase__ , max_length=lowerCamelCase__ , assertion=lowerCamelCase__ , ) return example def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' with jsonlines.open(lowerCamelCase__ , """a""" ) as writer: for example in tqdm(lowerCamelCase__ , total=len(lowerCamelCase__ ) , desc="""Saving samples ... """ ): A_ : int = example["""labels"""] for ids, start, end, cat in zip( example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { """input_ids""": ids, """start_token""": start, """end_token""": end, """category""": CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer lowerCamelCase :str = load_dataset('''natural_questions''') lowerCamelCase :Any = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''') lowerCamelCase :Optional[int] = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation'''] lowerCamelCase :Tuple = { '''tokenizer''': tokenizer, '''doc_stride''': DOC_STRIDE, '''max_length''': MAX_LENGTH, '''assertion''': False, } lowerCamelCase :Any = data.map(prepare_inputs, fn_kwargs=fn_kwargs) lowerCamelCase :Any = data.remove_columns(['''annotations''', '''document''', '''id''', '''question''']) print(data) np.random.seed(SEED) lowerCamelCase :List[str] = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl''' save_to_disk(data, file_name=cache_file_name)
686
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCamelCase :Any = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def a ( lowerCamelCase__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCamelCase :Tuple = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCamelCase :List[Any] = parser.parse_args() if args.check_lib: lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''') lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent else: lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
686
1
'''simple docstring''' def a ( lowerCamelCase__ = 10_00 ): '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
686
'''simple docstring''' lowerCamelCase :dict[tuple[int, int, int], int] = {} def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on A_ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 ) A_ : Optional[Any] = state_late + state_absent + state_ontime A_ : Dict = prizestrings return prizestrings def a ( lowerCamelCase__ = 30 ): '''simple docstring''' return _calculate(lowerCamelCase__ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
686
1
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowerCamelCase :Union[str, Any] = get_logger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ): '''simple docstring''' os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) with FSDP.state_dict_type( lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A_ : List[Any] = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A_ : int = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin' A_ : List[Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if accelerator.process_index == 0: logger.info(f'Saving model to {output_model_file}' ) torch.save(lowerCamelCase__ , lowerCamelCase__ ) logger.info(f'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A_ : Dict = ( f'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A_ : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) logger.info(f'Saving model to {output_model_file}' ) torch.save(lowerCamelCase__ , lowerCamelCase__ ) logger.info(f'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A_ : int = os.path.join(lowerCamelCase__ , f'{MODEL_NAME}_{model_index}' ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) logger.info(f'Saving model to {ckpt_dir}' ) A_ : Dict = {"""model""": state_dict} dist_cp.save_state_dict( state_dict=lowerCamelCase__ , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase__ ) , planner=DefaultSavePlanner() , ) logger.info(f'Model saved to {ckpt_dir}' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(lowerCamelCase__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( """Set the `sync_module_states` flag to `True` so that model states are synced across processes when """ """initializing FSDP object""" ) return A_ : Optional[Any] = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin' A_ : Union[str, Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) logger.info(f'Loading model from {input_model_file}' ) A_ : Union[str, Any] = torch.load(lowerCamelCase__ ) logger.info(f'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A_ : Any = ( f'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A_ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) logger.info(f'Loading model from {input_model_file}' ) A_ : Optional[int] = torch.load(lowerCamelCase__ ) logger.info(f'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A_ : str = ( os.path.join(lowerCamelCase__ , f'{MODEL_NAME}_{model_index}' ) if f'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(f'Loading model from {ckpt_dir}' ) A_ : Dict = {"""model""": model.state_dict()} dist_cp.load_state_dict( state_dict=lowerCamelCase__ , storage_reader=dist_cp.FileSystemReader(lowerCamelCase__ ) , planner=DefaultLoadPlanner() , ) A_ : Optional[Any] = state_dict["""model"""] logger.info(f'Model loaded from {ckpt_dir}' ) model.load_state_dict(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ): '''simple docstring''' os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) with FSDP.state_dict_type( lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A_ : Dict = FSDP.optim_state_dict(lowerCamelCase__ , lowerCamelCase__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A_ : Union[str, Any] = ( f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A_ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) logger.info(f'Saving Optimizer state to {output_optimizer_file}' ) torch.save(lowerCamelCase__ , lowerCamelCase__ ) logger.info(f'Optimizer state saved in {output_optimizer_file}' ) else: A_ : Union[str, Any] = os.path.join(lowerCamelCase__ , f'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) logger.info(f'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase__ ) , planner=DefaultSavePlanner() , ) logger.info(f'Optimizer state saved in {ckpt_dir}' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A_ : str = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A_ : Dict = ( f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A_ : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) logger.info(f'Loading Optimizer state from {input_optimizer_file}' ) A_ : str = torch.load(lowerCamelCase__ ) logger.info(f'Optimizer state loaded from {input_optimizer_file}' ) else: A_ : str = ( os.path.join(lowerCamelCase__ , f'{OPTIMIZER_NAME}_{optimizer_index}' ) if f'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(f'Loading Optimizer from {ckpt_dir}' ) A_ : Dict = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(lowerCamelCase__ ) , ) A_ : Dict = optim_state["""optimizer"""] logger.info(f'Optimizer loaded from {ckpt_dir}' ) A_ : Dict = FSDP.optim_state_dict_to_load(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) optimizer.load_state_dict(lowerCamelCase__ )
686
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Tuple = 'linear' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial' __SCREAMING_SNAKE_CASE : Optional[int] = 'constant' __SCREAMING_SNAKE_CASE : str = 'constant_with_warmup' __SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant' def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) ) return 1.0 return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' A_ : Optional[Any] = {} A_ : Optional[Any] = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: A_, A_ : Union[str, Any] = rule_str.split(""":""" ) A_ : Union[str, Any] = int(lowerCamelCase__ ) A_ : List[Any] = float(lowerCamelCase__ ) A_ : Union[str, Any] = value A_ : Optional[int] = float(rule_list[-1] ) def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ): def rule_func(lowerCamelCase__ ) -> float: A_ : str = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCamelCase__ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ): '''simple docstring''' A_ : Optional[Any] = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' ) def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A_ : str = lr_init - lr_end A_ : Tuple = num_training_steps - num_warmup_steps A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps A_ : Optional[int] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase :List[Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ): '''simple docstring''' A_ : Optional[Any] = SchedulerType(lowerCamelCase__ ) A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , ) return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
686
1
'''simple docstring''' # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path lowerCamelCase :Optional[Any] = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) lowerCamelCase :Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} lowerCamelCase :Any = '''zero2''' lowerCamelCase :Optional[int] = '''zero3''' lowerCamelCase :int = [ZEROa, ZEROa] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = parameterized.to_safe_name("""_""".join(str(lowerCamelCase__ ) for x in param.args ) ) return f'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test lowerCamelCase :Tuple = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class _lowerCAmelCase ( __UpperCAmelCase ): @parameterized.expand(lowercase , name_func=lowercase ) def _a (self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def _a (self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @parameterized.expand(lowercase , name_func=lowercase ) def _a (self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def _a (self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) def _a (self , lowercase ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def _a (self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ): A_ : str = models[model] A_ : Any = self.run_trainer( stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , ) self.do_checks(lowercase ) return output_dir def _a (self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ): A_ : Dict = self.get_auto_remove_tmp_dir("""./xxx""" , after=lowercase ) A_ : Tuple = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowercase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(["""--fp16"""] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files A_ : Optional[int] = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() A_ : List[Any] = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] A_ : Any = self.get_launcher(lowercase ) A_ : int = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowercase , env=self.get_env() ) return output_dir def _a (self , lowercase=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) A_ : Union[str, Any] = min(2 , get_gpu_count() ) if distributed else 1 return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
686
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''') lowerCamelCase :int = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } lowerCamelCase :List[str] = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } lowerCamelCase :Union[str, Any] = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } lowerCamelCase :Dict = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } lowerCamelCase :int = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } lowerCamelCase :str = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } lowerCamelCase :List[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } lowerCamelCase :Tuple = [] lowerCamelCase :Dict = [] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for attribute in key.split(""".""" ): A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: A_ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Dict = value elif weight_type == "bias": A_ : Dict = value elif weight_type == "running_mean": A_ : Optional[Any] = value elif weight_type == "running_var": A_ : int = value elif weight_type == "num_batches_tracked": A_ : Optional[Any] = value elif weight_type == "weight_ih_l0": A_ : Optional[int] = value elif weight_type == "weight_hh_l0": A_ : Union[str, Any] = value elif weight_type == "bias_ih_l0": A_ : Optional[int] = value elif weight_type == "bias_hh_l0": A_ : Tuple = value elif weight_type == "weight_ih_l1": A_ : Optional[int] = value elif weight_type == "weight_hh_l1": A_ : Dict = value elif weight_type == "bias_ih_l1": A_ : Optional[int] = value elif weight_type == "bias_hh_l1": A_ : Tuple = value else: A_ : Any = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A_, A_ : List[str] = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = [] if model_name == "encodec_24khz" or "encodec_32khz": A_ : List[str] = MAPPING_24K elif model_name == "encodec_48khz": A_ : str = MAPPING_48K else: raise ValueError(f'Unsupported model: {model_name}' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase__ , lowerCamelCase__ ): logger.info(f'{name} was ignored' ) continue A_ : str = False for key, mapped_key in MAPPING.items(): if "*" in key: A_, A_ : List[Any] = key.split(""".*.""" ) if prefix in name and suffix in name: A_ : Optional[Any] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ): continue A_ : Union[str, Any] = True if "*" in mapped_key: A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2] A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ ) if "weight_g" in name: A_ : Any = """weight_g""" elif "weight_v" in name: A_ : Tuple = """weight_v""" elif "weight_ih_l0" in name: A_ : Union[str, Any] = """weight_ih_l0""" elif "weight_hh_l0" in name: A_ : Tuple = """weight_hh_l0""" elif "bias_ih_l0" in name: A_ : str = """bias_ih_l0""" elif "bias_hh_l0" in name: A_ : List[Any] = """bias_hh_l0""" elif "weight_ih_l1" in name: A_ : Dict = """weight_ih_l1""" elif "weight_hh_l1" in name: A_ : Any = """weight_hh_l1""" elif "bias_ih_l1" in name: A_ : Optional[int] = """bias_ih_l1""" elif "bias_hh_l1" in name: A_ : List[Any] = """bias_hh_l1""" elif "bias" in name: A_ : List[str] = """bias""" elif "weight" in name: A_ : Optional[int] = """weight""" elif "running_mean" in name: A_ : Union[str, Any] = """running_mean""" elif "running_var" in name: A_ : Optional[int] = """running_var""" elif "num_batches_tracked" in name: A_ : List[Any] = """num_batches_tracked""" else: A_ : str = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f'Unused weights: {unused_weights}' ) @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): '''simple docstring''' if config_path is not None: A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ ) else: A_ : Optional[int] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A_ : Dict = [8, 5, 4, 4] A_ : Optional[Any] = [2.2] A_ : Tuple = 64 A_ : Tuple = 3_20_00 A_ : List[Any] = 20_48 A_ : Optional[Any] = False A_ : str = False A_ : Optional[int] = False elif model_name == "encodec_48khz": A_ : Dict = [8, 5, 4, 2] A_ : Tuple = [3.0, 6.0, 12.0, 24.0] A_ : List[Any] = 4_80_00 A_ : Dict = 2 A_ : Dict = False A_ : Dict = """time_group_norm""" A_ : Optional[Any] = True A_ : str = 1.0 A_ : Any = 0.01 else: raise ValueError(f'Unknown model name: {model_name}' ) A_ : Dict = EncodecModel(lowerCamelCase__ ) A_ : Any = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(lowerCamelCase__ ) A_ : int = torch.load(lowerCamelCase__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A_ : Tuple = original_checkpoint["""best_state"""] recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print("""Pushing to the hub...""" ) feature_extractor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Any = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowerCamelCase :Dict = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
686
1
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split lowerCamelCase :int = datasets.load_iris() lowerCamelCase :str = np.array(data['''data''']) lowerCamelCase :Dict = np.array(data['''target''']) lowerCamelCase :Union[str, Any] = data['''target_names'''] lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified A_ : List[str] = [] for data_point in data: A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
686
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :Any = logging.get_logger(__name__) lowerCamelCase :Any = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = 'beit' def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ): super().__init__(**lowercase ) A_ : Union[str, Any] = vocab_size A_ : List[str] = hidden_size A_ : Optional[int] = num_hidden_layers A_ : Tuple = num_attention_heads A_ : List[Any] = intermediate_size A_ : Optional[int] = hidden_act A_ : str = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : Dict = initializer_range A_ : str = layer_norm_eps A_ : Any = image_size A_ : int = patch_size A_ : List[str] = num_channels A_ : Any = use_mask_token A_ : Dict = use_absolute_position_embeddings A_ : List[Any] = use_relative_position_bias A_ : Tuple = use_shared_relative_position_bias A_ : Optional[int] = layer_scale_init_value A_ : Tuple = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : Optional[int] = use_auxiliary_head A_ : Union[str, Any] = auxiliary_loss_weight A_ : Tuple = auxiliary_channels A_ : List[Any] = auxiliary_num_convs A_ : Dict = auxiliary_concat_input A_ : Optional[Any] = semantic_loss_ignore_index class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4
686
1
'''simple docstring''' import math def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = [] A_ : Optional[Any] = 2 A_ : List[str] = int(math.sqrt(lowerCamelCase__ ) ) # Size of every segment A_ : int = [True] * (end + 1) A_ : int = [] while start <= end: if temp[start] is True: in_prime.append(lowerCamelCase__ ) for i in range(start * start , end + 1 , lowerCamelCase__ ): A_ : Tuple = False start += 1 prime += in_prime A_ : Optional[Any] = end + 1 A_ : List[Any] = min(2 * end , lowerCamelCase__ ) while low <= n: A_ : Tuple = [True] * (high - low + 1) for each in in_prime: A_ : List[Any] = math.floor(low / each ) * each if t < low: t += each for j in range(lowerCamelCase__ , high + 1 , lowerCamelCase__ ): A_ : Tuple = False for j in range(len(lowerCamelCase__ ) ): if temp[j] is True: prime.append(j + low ) A_ : Tuple = high + 1 A_ : Optional[Any] = min(high + end , lowerCamelCase__ ) return prime print(sieve(1_0**6))
686
'''simple docstring''' import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel lowerCamelCase :Optional[int] = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 1_3_1_0_7_2, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, } def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2 def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2 A_ : List[Any] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ ) class _lowerCAmelCase ( __UpperCAmelCase ): pass class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase ): super().__init__() A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 ) A_ : str = deepcopy(self.diffusion ) A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = MODELS_MAP[model_name]["""url"""] os.system(f'wget {url} ./' ) return f'./{model_name}.ckpt' lowerCamelCase :str = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } lowerCamelCase :str = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } lowerCamelCase :str = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } lowerCamelCase :int = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } lowerCamelCase :List[Any] = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } lowerCamelCase :Optional[Any] = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def a ( lowerCamelCase__ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""" , RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'ResConvBlock error with {name}' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def a ( lowerCamelCase__ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ): return name.replace(lowerCamelCase__ , lowerCamelCase__ ) elif name.startswith(lowerCamelCase__ ): return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value] raise ValueError(f'Attn error with {name}' ) def a ( lowerCamelCase__ , lowerCamelCase__=13 ): '''simple docstring''' A_ : Union[str, Any] = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""" , """time_proj""" ) A_ : Dict = 0 if string.startswith("""net.3.""" ): depth += 1 A_ : int = string[6:] elif string.startswith("""net.""" ): A_ : Tuple = string[4:] while string.startswith("""main.7.""" ): depth += 1 A_ : Dict = string[7:] if string.startswith("""main.""" ): A_ : Union[str, Any] = string[5:] # mid block if string[:2].isdigit(): A_ : Optional[Any] = string[:2] A_ : Optional[Any] = string[2:] else: A_ : List[Any] = string[0] A_ : Dict = string[1:] if depth == max_depth: A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num] A_ : Optional[Any] = """mid_block""" elif depth > 0 and int(lowerCamelCase__ ) < 7: A_ : Any = DOWN_NUM_TO_LAYER[layer_num] A_ : Union[str, Any] = f'down_blocks.{depth}' elif depth > 0 and int(lowerCamelCase__ ) > 7: A_ : List[str] = UP_NUM_TO_LAYER[layer_num] A_ : List[str] = f'up_blocks.{max_depth - depth - 1}' elif depth == 0: A_ : str = DEPTH_0_TO_LAYER[layer_num] A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' ) A_ : Optional[int] = string_left[1:] if "resnets" in new_layer: A_ : Tuple = convert_resconv_naming(lowerCamelCase__ ) elif "attentions" in new_layer: A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ ) A_ : Dict = new_string_left if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left else: A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue A_ : List[Any] = rename(lowerCamelCase__ ) # check if we need to transform from Conv => Linear for attention if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: A_ : int = v return new_state_dict def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if len(lowerCamelCase__ ) == 1: if len(v.shape ) == 3: # weight A_ : Optional[Any] = v[:, :, 0] else: # bias A_ : Union[str, Any] = v else: # qkv matrices A_ : Optional[int] = v.shape[0] A_ : str = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0] else: A_ : str = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}' A_ : int = download(lowerCamelCase__ ) A_ : Any = MODELS_MAP[model_name]["""sample_rate"""] A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""] A_ : Tuple = Object() A_ : Union[str, Any] = sample_size A_ : Tuple = sample_rate A_ : int = 0 A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ ) A_ : Optional[Any] = diffusers_model.state_dict() A_ : Dict = DiffusionUncond(lowerCamelCase__ ) orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] ) A_ : Any = orig_model.diffusion_ema.eval() A_ : Any = orig_model.state_dict() A_ : List[str] = rename_orig_weights(lowerCamelCase__ ) A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}' assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}' if key == "time_proj.weight": A_ : str = value.squeeze() A_ : Union[str, Any] = value diffusers_model.load_state_dict(lowerCamelCase__ ) A_ : Optional[Any] = 1_00 A_ : Union[str, Any] = 33 A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ ) A_ : List[str] = torch.manual_seed(lowerCamelCase__ ) A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ ) A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1] A_ : List[Any] = get_crash_schedule(lowerCamelCase__ ) A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ ) A_ : str = torch.manual_seed(33 ) A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} ) A_ : str = generated.clamp(-1 , 1 ) A_ : List[Any] = (generated - audio).abs().sum() A_ : int = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""" , lowerCamelCase__ ) print("""Diff max""" , lowerCamelCase__ ) assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/' print(f'Conversion for {model_name} successful!' ) if __name__ == "__main__": lowerCamelCase :int = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') lowerCamelCase :List[str] = parser.parse_args() main(args)
686
1
'''simple docstring''' import re def a ( lowerCamelCase__ ): '''simple docstring''' if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
686
'''simple docstring''' from math import factorial def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) ) coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
686
1
'''simple docstring''' # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = { """en""": """Machine learning is great, isn't it?""", """ru""": """Машинное обучение - это здорово, не так ли?""", """de""": """Maschinelles Lernen ist großartig, nicht wahr?""", } # BLUE scores as follows: # "pair": [fairseq, transformers] A_ : Tuple = { """wmt16-en-de-dist-12-1""": [28.3, 27.52], """wmt16-en-de-dist-6-1""": [27.4, 27.11], """wmt16-en-de-12-1""": [26.9, 25.75], } A_ : Optional[int] = f'{src_lang}-{tgt_lang}' A_ : Optional[int] = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n' model_card_dir.mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ ) A_ : Union[str, Any] = os.path.join(lowerCamelCase__ , """README.md""" ) print(f'Generating {path}' ) with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowerCamelCase__ ) # make sure we are under the root of the project lowerCamelCase :Tuple = Path(__file__).resolve().parent.parent.parent lowerCamelCase :Optional[Any] = repo_dir / '''model_cards''' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: lowerCamelCase :List[str] = model_cards_dir / '''allenai''' / model_name write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
686
'''simple docstring''' import re def a ( lowerCamelCase__ ): '''simple docstring''' if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
686
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :Tuple = logging.get_logger(__name__) lowerCamelCase :Optional[Any] = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[Any] = 'gpt_bigcode' __SCREAMING_SNAKE_CASE : Any = ['past_key_values'] __SCREAMING_SNAKE_CASE : List[str] = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__(self , lowercase=50257 , lowercase=1024 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=None , lowercase="gelu_pytorch_tanh" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=50256 , lowercase=50256 , lowercase=True , lowercase=True , lowercase=True , **lowercase , ): A_ : Optional[Any] = vocab_size A_ : Dict = n_positions A_ : Tuple = n_embd A_ : List[Any] = n_layer A_ : List[Any] = n_head A_ : Union[str, Any] = n_inner A_ : List[Any] = activation_function A_ : Dict = resid_pdrop A_ : str = embd_pdrop A_ : int = attn_pdrop A_ : List[str] = layer_norm_epsilon A_ : List[str] = initializer_range A_ : str = scale_attn_weights A_ : str = use_cache A_ : int = attention_softmax_in_fpaa A_ : Dict = scale_attention_softmax_in_fpaa A_ : Optional[Any] = multi_query A_ : List[Any] = bos_token_id A_ : Union[str, Any] = eos_token_id super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
686
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCamelCase__ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCamelCase__ ): http_head("""https://huggingface.co""" )
686
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[Any] = ['input_features', 'is_longer'] def __init__(self , lowercase=64 , lowercase=48000 , lowercase=480 , lowercase=10 , lowercase=1024 , lowercase=0.0 , lowercase=False , lowercase = 0 , lowercase = 14000 , lowercase = None , lowercase = "fusion" , lowercase = "repeatpad" , **lowercase , ): super().__init__( feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , ) A_ : Dict = top_db A_ : Optional[int] = truncation A_ : Optional[Any] = padding A_ : List[Any] = fft_window_size A_ : List[str] = (fft_window_size >> 1) + 1 A_ : Any = hop_length A_ : str = max_length_s A_ : Dict = max_length_s * sampling_rate A_ : Dict = sampling_rate A_ : List[str] = frequency_min A_ : int = frequency_max A_ : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase , min_frequency=lowercase , max_frequency=lowercase , sampling_rate=lowercase , norm=lowercase , mel_scale="""htk""" , ) A_ : Optional[int] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase , min_frequency=lowercase , max_frequency=lowercase , sampling_rate=lowercase , norm="""slaney""" , mel_scale="""slaney""" , ) def _a (self ): A_ : Tuple = copy.deepcopy(self.__dict__ ) A_ : List[str] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _a (self , lowercase , lowercase = None ): A_ : List[Any] = spectrogram( lowercase , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowercase , log_mel="""dB""" , ) return log_mel_spectrogram.T def _a (self , lowercase , lowercase , lowercase ): A_ : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk A_ : Dict = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk A_ : Dict = [0] # randomly choose index for each part A_ : Dict = np.random.choice(ranges[0] ) A_ : List[str] = np.random.choice(ranges[1] ) A_ : Union[str, Any] = np.random.choice(ranges[2] ) A_ : str = mel[idx_front : idx_front + chunk_frames, :] A_ : str = mel[idx_middle : idx_middle + chunk_frames, :] A_ : List[str] = mel[idx_back : idx_back + chunk_frames, :] A_ : Union[str, Any] = torch.tensor(mel[None, None, :] ) A_ : List[Any] = torch.nn.functional.interpolate( lowercase , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=lowercase ) A_ : Union[str, Any] = mel_shrink[0][0].numpy() A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def _a (self , lowercase , lowercase , lowercase , lowercase ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": A_ : Optional[Any] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad A_ : Tuple = len(lowercase ) - max_length A_ : Optional[Any] = np.random.randint(0 , overflow + 1 ) A_ : Tuple = waveform[idx : idx + max_length] A_ : List[str] = self._np_extract_fbank_features(lowercase , self.mel_filters_slaney )[None, :] elif truncation == "fusion": A_ : Optional[int] = self._np_extract_fbank_features(lowercase , self.mel_filters ) A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed A_ : Union[str, Any] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. A_ : int = np.stack([mel, mel, mel, mel] , axis=0 ) A_ : Optional[int] = False else: A_ : str = self._random_mel_fusion(lowercase , lowercase , lowercase ) A_ : Optional[int] = True else: raise NotImplementedError(F'data_truncating {truncation} not implemented' ) else: A_ : List[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": A_ : Any = int(max_length / len(lowercase ) ) A_ : Optional[Any] = np.stack(np.tile(lowercase , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": A_ : List[Any] = int(max_length / len(lowercase ) ) A_ : Union[str, Any] = np.stack(np.tile(lowercase , lowercase ) ) A_ : Any = np.pad(lowercase , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 ) if truncation == "fusion": A_ : Optional[Any] = self._np_extract_fbank_features(lowercase , self.mel_filters ) A_ : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: A_ : Optional[int] = self._np_extract_fbank_features(lowercase , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ): A_ : List[Any] = truncation if truncation is not None else self.truncation A_ : str = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' F' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) A_ : List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A_ : Any = is_batched_numpy or ( isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A_ : List[Any] = [np.asarray(lowercase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowercase , np.ndarray ): A_ : Tuple = np.asarray(lowercase , dtype=np.floataa ) elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Optional[Any] = [np.asarray(lowercase )] # convert to mel spectrogram, truncate and pad if needed. A_ : Optional[Any] = [ self._get_input_mel(lowercase , max_length if max_length else self.nb_max_samples , lowercase , lowercase ) for waveform in raw_speech ] A_ : Optional[int] = [] A_ : Union[str, Any] = [] for mel, longer in padded_inputs: input_mel.append(lowercase ) is_longer.append(lowercase ) if truncation == "fusion" and sum(lowercase ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer A_ : Optional[Any] = np.random.randint(0 , len(lowercase ) ) A_ : Union[str, Any] = True if isinstance(input_mel[0] , lowercase ): A_ : List[str] = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool A_ : List[Any] = [[longer] for longer in is_longer] A_ : List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} A_ : int = BatchFeature(lowercase ) if return_tensors is not None: A_ : List[Any] = input_features.convert_to_tensors(lowercase ) return input_features
686
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCamelCase :Any = re.compile(R'''\s+''') def a ( lowerCamelCase__ ): '''simple docstring''' return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def a ( lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""] A_ : Optional[int] = example["""content"""].splitlines() for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ): '''simple docstring''' A_ : Any = ["""unit tests""", """test file""", """configuration file"""] A_ : List[str] = example["""content"""].splitlines() A_ : str = 0 A_ : Union[str, Any] = 0 # first test for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test A_ : List[Any] = example["""content"""].count("""\n""" ) A_ : Any = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = ["""def """, """class """, """for """, """while """] A_ : Optional[int] = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def a ( lowerCamelCase__ , lowerCamelCase__=4 ): '''simple docstring''' A_ : Tuple = example["""content"""].splitlines() A_ : int = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""] A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ ) return {"ratio": ratio} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = {} results.update(get_hash(lowerCamelCase__ ) ) results.update(line_stats(lowerCamelCase__ ) ) results.update(alpha_stats(lowerCamelCase__ ) ) results.update(char_token_ratio(lowerCamelCase__ ) ) results.update(is_autogenerated(lowerCamelCase__ ) ) results.update(is_config_or_test(lowerCamelCase__ ) ) results.update(has_no_keywords(lowerCamelCase__ ) ) results.update(has_few_assignments(lowerCamelCase__ ) ) return results def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def a ( lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ , """rb""" ) as f_in: with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ ) os.unlink(lowerCamelCase__ ) # Settings lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments) lowerCamelCase :Tuple = parser.parse_args() if args.num_workers is None: lowerCamelCase :Tuple = multiprocessing.cpu_count() lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCamelCase :List[Any] = time.time() lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''') print(F"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCamelCase :int = time.time() lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers) print(F"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCamelCase :int = set(ds.unique('''hash''')) lowerCamelCase :List[str] = len(uniques) / len(ds) print(F"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCamelCase :Dict = time.time() lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"Time to filter dataset: {time.time()-t_start:.2f}") print(F"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCamelCase :List[str] = time.time() lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(F"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCamelCase :int = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) lowerCamelCase :Tuple = output_dir / '''data''' data_dir.mkdir(exist_ok=True) lowerCamelCase :Tuple = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json") lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"Time to save dataset: {time.time()-t_start:.2f}")
686
1
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy lowerCamelCase :Dict = logging.getLogger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , ): '''simple docstring''' A_ : Optional[int] = bnb_quantization_config.load_in_abit A_ : List[Any] = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) A_ : str = [] # custom device map if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(device_map.keys() ) > 1: A_ : int = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: A_ : List[str] = get_keys_to_not_convert(lowerCamelCase__ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(lowerCamelCase__ ) A_ : Optional[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: A_ : str = [] A_ : Tuple = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(lowerCamelCase__ ) # compatibility with peft A_ : int = load_in_abit A_ : Any = load_in_abit A_ : Any = get_parameter_device(lowerCamelCase__ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) A_ : List[str] = replace_with_bnb_layers(lowerCamelCase__ , lowerCamelCase__ , modules_to_not_convert=lowerCamelCase__ ) # convert param to the right dtype A_ : List[Any] = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: A_ : List[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) A_ : int = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(lowerCamelCase__ ): param.to(lowerCamelCase__ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f'The model device type is {model_device.type}. However, cuda is needed for quantization.' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' ) else: with init_empty_weights(): A_ : Dict = replace_with_bnb_layers( lowerCamelCase__ , lowerCamelCase__ , modules_to_not_convert=lowerCamelCase__ ) A_ : Optional[Any] = get_quantized_model_device_map( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , max_memory=lowerCamelCase__ , no_split_module_classes=lowerCamelCase__ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): A_ : Optional[int] = True A_ : Optional[int] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCamelCase__ , offload_state_dict=lowerCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(lowerCamelCase__ , device_map=lowerCamelCase__ , offload_dir=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ): '''simple docstring''' if device_map is None: if torch.cuda.is_available(): A_ : Dict = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) A_ : int = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) A_ : Tuple = {} A_ : int = special_dtypes A_ : Tuple = no_split_module_classes A_ : Dict = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": A_ : Optional[Any] = get_balanced_memory( lowerCamelCase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=lowerCamelCase__ , **lowerCamelCase__ , ) A_ : Dict = max_memory A_ : Tuple = infer_auto_device_map(lowerCamelCase__ , **lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): # check if don't have any quantized module on the cpu A_ : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules A_ : Union[str, Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ): '''simple docstring''' if modules_to_not_convert is None: A_ : List[Any] = [] A_, A_ : int = _replace_with_bnb_layers( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): '''simple docstring''' A_ : int = False for name, module in model.named_children(): if current_key_name is None: A_ : List[Any] = [] current_key_name.append(lowerCamelCase__ ) if isinstance(lowerCamelCase__ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` A_ : Dict = """.""".join(lowerCamelCase__ ) A_ : Optional[int] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: A_ : List[Any] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: A_ : List[str] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: A_ : Any = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) A_ : Any = module.weight.data if module.bias is not None: A_ : List[str] = module.bias.data bnb_module.requires_grad_(lowerCamelCase__ ) setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : Dict = True if len(list(module.children() ) ) > 0: A_, A_ : Any = _replace_with_bnb_layers( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def a ( lowerCamelCase__ ): '''simple docstring''' with init_empty_weights(): A_ : str = deepcopy(lowerCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` A_ : Any = find_tied_parameters(lowerCamelCase__ ) # For compatibility with Accelerate < 0.18 if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A_ : Optional[int] = sum(lowerCamelCase__ , [] ) A_ : Union[str, Any] = len(lowerCamelCase__ ) > 0 # Check if it is a base model A_ : int = False if hasattr(lowerCamelCase__ , """base_model_prefix""" ): A_ : int = not hasattr(lowerCamelCase__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A_ : List[Any] = list(model.named_children() ) A_ : int = [list_modules[-1][0]] # add last module together with tied weights A_ : Tuple = set(lowerCamelCase__ ) - set(lowerCamelCase__ ) A_ : Union[str, Any] = list(set(lowerCamelCase__ ) ) + list(lowerCamelCase__ ) # remove ".weight" from the keys A_ : Union[str, Any] = [""".weight""", """.bias"""] A_ : List[str] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A_ : Any = name.replace(lowerCamelCase__ , """""" ) filtered_module_names.append(lowerCamelCase__ ) return filtered_module_names def a ( lowerCamelCase__ ): '''simple docstring''' for m in model.modules(): if isinstance(lowerCamelCase__ , bnb.nn.Linearabit ): return True return False def a ( lowerCamelCase__ ): '''simple docstring''' return next(parameter.parameters() ).device def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if fpaa_statistics is None: set_module_tensor_to_device(lowerCamelCase__ , lowerCamelCase__ , 0 , dtype=lowerCamelCase__ , value=lowerCamelCase__ ) A_ : Any = param_name A_ : Dict = model if "." in tensor_name: A_ : str = tensor_name.split(""".""" ) for split in splits[:-1]: A_ : Union[str, Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ) if new_module is None: raise ValueError(f'{module} has no attribute {split}.' ) A_ : str = new_module A_ : Dict = splits[-1] # offload weights A_ : str = False offload_weight(module._parameters[tensor_name] , lowerCamelCase__ , lowerCamelCase__ , index=lowerCamelCase__ ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowerCamelCase__ , index=lowerCamelCase__ , ) else: offload_weight(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index=lowerCamelCase__ ) offload_weight(lowerCamelCase__ , param_name.replace("""weight""" , """SCB""" ) , lowerCamelCase__ , index=lowerCamelCase__ ) set_module_tensor_to_device(lowerCamelCase__ , lowerCamelCase__ , """meta""" , dtype=lowerCamelCase__ , value=torch.empty(*param.size() ) )
686
'''simple docstring''' import pytest lowerCamelCase :Optional[Any] = '''__dummy_dataset1__''' lowerCamelCase :List[Any] = ''' import json import os import datasets REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", ] ) ), "langs": datasets.Sequence(datasets.Value("string")), "spans": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, json.loads(line) ''' @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = dataset_loading_script_name A_ : int = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase__ ) A_ : Tuple = script_dir / f'{script_name}.py' with open(lowerCamelCase__ , """w""" ) as f: f.write(lowerCamelCase__ ) return str(lowerCamelCase__ )
686
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = LDMTextToImagePipeline __SCREAMING_SNAKE_CASE : int = TEXT_TO_IMAGE_PARAMS - { 'negative_prompt', 'negative_prompt_embeds', 'cross_attention_kwargs', 'prompt_embeds', } __SCREAMING_SNAKE_CASE : Optional[int] = PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'callback', 'callback_steps', } __SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS __SCREAMING_SNAKE_CASE : Tuple = False def _a (self ): torch.manual_seed(0 ) A_ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) A_ : Any = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , ) torch.manual_seed(0 ) A_ : List[Any] = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , ) torch.manual_seed(0 ) A_ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) A_ : Optional[Any] = CLIPTextModel(lowercase ) A_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) A_ : Dict = { """unet""": unet, """scheduler""": scheduler, """vqvae""": vae, """bert""": text_encoder, """tokenizer""": tokenizer, } return components def _a (self , lowercase , lowercase=0 ): if str(lowercase ).startswith("""mps""" ): A_ : Tuple = torch.manual_seed(lowercase ) else: A_ : str = torch.Generator(device=lowercase ).manual_seed(lowercase ) A_ : List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _a (self ): A_ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ : Tuple = self.get_dummy_components() A_ : Tuple = LDMTextToImagePipeline(**lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : int = self.get_dummy_inputs(lowercase ) A_ : Any = pipe(**lowercase ).images A_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) A_ : str = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _a (self , lowercase , lowercase=torch.floataa , lowercase=0 ): A_ : Tuple = torch.manual_seed(lowercase ) A_ : Dict = np.random.RandomState(lowercase ).standard_normal((1, 4, 32, 32) ) A_ : Optional[Any] = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase ) A_ : str = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _a (self ): A_ : int = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Any = self.get_inputs(lowercase ) A_ : str = pipe(**lowercase ).images A_ : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) A_ : str = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] ) A_ : Optional[Any] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _a (self , lowercase , lowercase=torch.floataa , lowercase=0 ): A_ : Optional[Any] = torch.manual_seed(lowercase ) A_ : Dict = np.random.RandomState(lowercase ).standard_normal((1, 4, 32, 32) ) A_ : Any = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase ) A_ : Any = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _a (self ): A_ : Optional[Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Optional[Any] = self.get_inputs(lowercase ) A_ : Tuple = pipe(**lowercase ).images[0] A_ : Optional[int] = load_numpy( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" ) A_ : Any = np.abs(expected_image - image ).max() assert max_diff < 1E-3
686
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split lowerCamelCase :int = datasets.load_iris() lowerCamelCase :str = np.array(data['''data''']) lowerCamelCase :Dict = np.array(data['''target''']) lowerCamelCase :Union[str, Any] = data['''target_names'''] lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ ) # List of distances of all points from the point to be classified A_ : List[str] = [] for data_point in data: A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
686
1
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCamelCase :Any = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def a ( lowerCamelCase__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCamelCase :Tuple = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCamelCase :List[Any] = parser.parse_args() if args.check_lib: lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''') lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent else: lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
686
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): def _a (self , lowercase ): if isinstance(lowercase , lowercase ): A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()] return labels def __call__(self , lowercase , lowercase , lowercase ): if len(lowercase ) == 0 or len(lowercase ) == 0: raise ValueError("""You must include at least one label and at least one sequence.""" ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( """The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """ """Make sure the passed template includes formatting syntax such as {{}} where the label should go.""" ).format(lowercase ) ) if isinstance(lowercase , lowercase ): A_ : Tuple = [sequences] A_ : int = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ): A_ : int = args_parser super().__init__(*lowercase , **lowercase ) if self.entailment_id == -1: logger.warning( """Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """ """-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" ) @property def _a (self ): for label, ind in self.model.config.labelaid.items(): if label.lower().startswith("""entail""" ): return ind return -1 def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ): A_ : Any = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( """Tokenizer was not supporting padding necessary for zero-shot, attempting to use """ """ `pad_token=eos_token`""" ) A_ : str = self.tokenizer.eos_token try: A_ : str = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , ) except Exception as e: if "too short" in str(lowercase ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. A_ : Any = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def _a (self , **lowercase ): if kwargs.get("""multi_class""" , lowercase ) is not None: A_ : Tuple = kwargs["""multi_class"""] logger.warning( """The `multi_class` argument has been deprecated and renamed to `multi_label`. """ """`multi_class` will be removed in a future version of Transformers.""" ) A_ : Optional[Any] = {} if "candidate_labels" in kwargs: A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] ) if "hypothesis_template" in kwargs: A_ : List[str] = kwargs["""hypothesis_template"""] A_ : List[Any] = {} if "multi_label" in kwargs: A_ : Optional[Any] = kwargs["""multi_label"""] return preprocess_params, {}, postprocess_params def __call__(self , lowercase , *lowercase , **lowercase , ): if len(lowercase ) == 0: pass elif len(lowercase ) == 1 and "candidate_labels" not in kwargs: A_ : Union[str, Any] = args[0] else: raise ValueError(F'Unable to understand extra arguments {args}' ) return super().__call__(lowercase , **lowercase ) def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ): A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ): A_ : List[Any] = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowercase ) - 1, **model_input, } def _a (self , lowercase ): A_ : Optional[Any] = inputs["""candidate_label"""] A_ : List[Any] = inputs["""sequence"""] A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names} A_ : List[str] = self.model(**lowercase ) A_ : str = { """candidate_label""": candidate_label, """sequence""": sequence, """is_last""": inputs["""is_last"""], **outputs, } return model_outputs def _a (self , lowercase , lowercase=False ): A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs] A_ : str = [outputs["""sequence"""] for outputs in model_outputs] A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] ) A_ : Dict = logits.shape[0] A_ : Any = len(lowercase ) A_ : List[str] = N // n A_ : Tuple = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowercase ) == 1: # softmax over the entailment vs. contradiction dim for each label independently A_ : Union[str, Any] = self.entailment_id A_ : Any = -1 if entailment_id == 0 else 0 A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]] A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase ) A_ : Optional[Any] = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels A_ : Optional[int] = reshaped_outputs[..., self.entailment_id] A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase ) A_ : Any = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
686
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :Any = logging.get_logger(__name__) lowerCamelCase :Any = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = 'beit' def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ): super().__init__(**lowercase ) A_ : Union[str, Any] = vocab_size A_ : List[str] = hidden_size A_ : Optional[int] = num_hidden_layers A_ : Tuple = num_attention_heads A_ : List[Any] = intermediate_size A_ : Optional[int] = hidden_act A_ : str = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : Dict = initializer_range A_ : str = layer_norm_eps A_ : Any = image_size A_ : int = patch_size A_ : List[str] = num_channels A_ : Any = use_mask_token A_ : Dict = use_absolute_position_embeddings A_ : List[Any] = use_relative_position_bias A_ : Tuple = use_shared_relative_position_bias A_ : Optional[int] = layer_scale_init_value A_ : Tuple = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : Optional[int] = use_auxiliary_head A_ : Union[str, Any] = auxiliary_loss_weight A_ : Tuple = auxiliary_channels A_ : List[Any] = auxiliary_num_convs A_ : Dict = auxiliary_concat_input A_ : Optional[Any] = semantic_loss_ignore_index class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4
686
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :int = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'yolos' def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ): super().__init__(**lowercase ) A_ : List[Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Any = num_attention_heads A_ : Any = intermediate_size A_ : int = hidden_act A_ : Optional[Any] = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : List[str] = initializer_range A_ : Optional[Any] = layer_norm_eps A_ : List[str] = image_size A_ : str = patch_size A_ : int = num_channels A_ : Optional[int] = qkv_bias A_ : List[Any] = num_detection_tokens A_ : Tuple = use_mid_position_embeddings A_ : int = auxiliary_loss # Hungarian matcher A_ : int = class_cost A_ : List[Any] = bbox_cost A_ : Optional[int] = giou_cost # Loss coefficients A_ : Any = bbox_loss_coefficient A_ : List[Any] = giou_loss_coefficient A_ : str = eos_coefficient class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4 @property def _a (self ): return 12
686
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = StableDiffusionInpaintPipeline __SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __SCREAMING_SNAKE_CASE : Any = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : str = frozenset([] ) def _a (self ): torch.manual_seed(0 ) A_ : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase , ) A_ : Any = PNDMScheduler(skip_prk_steps=lowercase ) torch.manual_seed(0 ) A_ : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) A_ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) A_ : Dict = CLIPTextModel(lowercase ) A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) A_ : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a (self , lowercase , lowercase=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched A_ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase ) A_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : Dict = Image.fromarray(np.uinta(lowercase ) ).convert("""RGB""" ).resize((64, 64) ) A_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) ) if str(lowercase ).startswith("""mps""" ): A_ : Dict = torch.manual_seed(lowercase ) else: A_ : List[Any] = torch.Generator(device=lowercase ).manual_seed(lowercase ) A_ : Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _a (self ): A_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ : int = self.get_dummy_components() A_ : List[str] = StableDiffusionInpaintPipeline(**lowercase ) A_ : Optional[int] = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A_ : Tuple = self.get_dummy_inputs(lowercase ) A_ : List[str] = sd_pipe(**lowercase ).images A_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : List[Any] = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _a (self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _a (self ): A_ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) A_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) A_ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) A_ : Union[str, Any] = """stabilityai/stable-diffusion-2-inpainting""" A_ : str = StableDiffusionInpaintPipeline.from_pretrained(lowercase , safety_checker=lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : List[str] = """Face of a yellow cat, high resolution, sitting on a park bench""" A_ : Optional[Any] = torch.manual_seed(0 ) A_ : Optional[Any] = pipe( prompt=lowercase , image=lowercase , mask_image=lowercase , generator=lowercase , output_type="""np""" , ) A_ : List[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9E-3 def _a (self ): A_ : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) A_ : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) A_ : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) A_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting""" A_ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained( lowercase , torch_dtype=torch.floataa , safety_checker=lowercase , ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : int = """Face of a yellow cat, high resolution, sitting on a park bench""" A_ : Tuple = torch.manual_seed(0 ) A_ : Any = pipe( prompt=lowercase , image=lowercase , mask_image=lowercase , generator=lowercase , output_type="""np""" , ) A_ : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _a (self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) A_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) A_ : str = """stabilityai/stable-diffusion-2-inpainting""" A_ : Any = PNDMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" ) A_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained( lowercase , safety_checker=lowercase , scheduler=lowercase , torch_dtype=torch.floataa , ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A_ : str = """Face of a yellow cat, high resolution, sitting on a park bench""" A_ : Tuple = torch.manual_seed(0 ) A_ : Optional[Any] = pipe( prompt=lowercase , image=lowercase , mask_image=lowercase , generator=lowercase , num_inference_steps=2 , output_type="""np""" , ) A_ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
686
'''simple docstring''' from jiwer import compute_measures import datasets lowerCamelCase :int = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' lowerCamelCase :int = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' lowerCamelCase :Optional[Any] = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def _a (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ] , ) def _a (self , lowercase=None , lowercase=None , lowercase=False ): if concatenate_texts: return compute_measures(lowercase , lowercase )["wer"] else: A_ : List[Any] = 0 A_ : Optional[int] = 0 for prediction, reference in zip(lowercase , lowercase ): A_ : Any = compute_measures(lowercase , lowercase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
686
1
'''simple docstring''' from jiwer import compute_measures import datasets lowerCamelCase :int = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' lowerCamelCase :int = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' lowerCamelCase :Optional[Any] = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def _a (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ] , ) def _a (self , lowercase=None , lowercase=None , lowercase=False ): if concatenate_texts: return compute_measures(lowercase , lowercase )["wer"] else: A_ : List[Any] = 0 A_ : Optional[int] = 0 for prediction, reference in zip(lowercase , lowercase ): A_ : Any = compute_measures(lowercase , lowercase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
686
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline __SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'negative_prompt', 'height', 'width', 'negative_prompt_embeds', } __SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'} __SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} ) __SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS __SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def _a (self ): torch.manual_seed(0 ) A_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) A_ : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , ) torch.manual_seed(0 ) A_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) A_ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) A_ : int = CLIPTextModel(lowercase ) A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) A_ : Any = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _a (self , lowercase , lowercase=0 ): A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase ) A_ : int = image / 2 + 0.5 if str(lowercase ).startswith("""mps""" ): A_ : int = torch.manual_seed(lowercase ) else: A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase ) A_ : Union[str, Any] = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def _a (self ): A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ : Optional[Any] = self.get_dummy_components() A_ : Any = CycleDiffusionPipeline(**lowercase ) A_ : int = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : int = self.get_dummy_inputs(lowercase ) A_ : str = pipe(**lowercase ) A_ : str = output.images A_ : Dict = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _a (self ): A_ : Dict = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase , """half""" ): A_ : List[str] = module.half() A_ : List[Any] = CycleDiffusionPipeline(**lowercase ) A_ : Optional[Any] = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Any = self.get_dummy_inputs(lowercase ) A_ : Tuple = pipe(**lowercase ) A_ : List[str] = output.images A_ : Union[str, Any] = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _a (self ): return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def _a (self ): return super().test_inference_batch_single_identical() @skip_mps def _a (self ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def _a (self ): return super().test_save_load_optional_components() @skip_mps def _a (self ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _a (self ): A_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) A_ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) A_ : List[str] = init_image.resize((512, 512) ) A_ : Dict = """CompVis/stable-diffusion-v1-4""" A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" ) A_ : Any = CycleDiffusionPipeline.from_pretrained( lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : str = """A black colored car""" A_ : Dict = """A blue colored car""" A_ : Union[str, Any] = torch.manual_seed(0 ) A_ : Optional[int] = pipe( prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , ) A_ : str = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def _a (self ): A_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) A_ : Optional[int] = init_image.resize((512, 512) ) A_ : Optional[int] = """CompVis/stable-diffusion-v1-4""" A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" ) A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : Optional[Any] = """A black colored car""" A_ : int = """A blue colored car""" A_ : str = torch.manual_seed(0 ) A_ : Any = pipe( prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , ) A_ : int = output.images assert np.abs(image - expected_image ).max() < 2E-2
686
1
'''simple docstring''' def a ( lowerCamelCase__ = 50 ): '''simple docstring''' A_ : List[Any] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"{solution() = }")
686
'''simple docstring''' import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[str] = 'down' def _a (self ): A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'down' def _a (self ): A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' def _a (self ): A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' def _a (self ): A_, A_ : str = super().prepare_init_args_and_inputs_for_common() A_ : Optional[Any] = 32 return init_dict, inputs_dict def _a (self ): A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Union[str, Any] = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _a (self ): A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'down' @property def _a (self ): return super().get_dummy_input(include_skip_sample=lowercase ) def _a (self ): A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Union[str, Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_skip_sample=lowercase ) def _a (self ): A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Any = 'down' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : int = { """in_channels""": 32, """out_channels""": 32, } A_ : Any = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Optional[Any] = 'down' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : Optional[Any] = { """in_channels""": 32, """out_channels""": 32, } A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'mid' def _a (self ): A_ : Optional[Any] = { """in_channels""": 32, """temb_channels""": 128, } A_ : Any = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405 __SCREAMING_SNAKE_CASE : Optional[int] = 'mid' def _a (self ): A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common() A_ : List[str] = 32 return init_dict, inputs_dict def _a (self ): A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'mid' @property def _a (self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common() A_ : Optional[int] = 32 return init_dict, inputs_dict def _a (self ): A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : str = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Any = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Union[str, Any] = 32 return init_dict, inputs_dict def _a (self ): A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase ) def _a (self ): A_, A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : int = 32 return init_dict, inputs_dict def _a (self ): A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[str] = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _a (self ): A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Tuple = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : List[Any] = 'up' @property def _a (self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase ) def _a (self ): A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : str = 'up' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32} A_ : Optional[int] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37] super().test_output(lowercase ) class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405 __SCREAMING_SNAKE_CASE : Dict = 'up' @property def _a (self ): return super().get_dummy_input(include_temb=lowercase ) def _a (self ): A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32} A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a (self ): A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68] super().test_output(lowercase )
686
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :int = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = 'poolformer' def __init__(self , lowercase=3 , lowercase=16 , lowercase=16 , lowercase=3 , lowercase=4.0 , lowercase=[2, 2, 6, 2] , lowercase=[64, 128, 320, 512] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[2, 1, 1, 1] , lowercase=4 , lowercase=0.0 , lowercase="gelu" , lowercase=True , lowercase=1E-5 , lowercase=0.02 , **lowercase , ): A_ : Tuple = num_channels A_ : Any = patch_size A_ : Optional[Any] = stride A_ : Union[str, Any] = padding A_ : str = pool_size A_ : Union[str, Any] = hidden_sizes A_ : Optional[int] = mlp_ratio A_ : Tuple = depths A_ : Tuple = patch_sizes A_ : List[str] = strides A_ : Union[str, Any] = num_encoder_blocks A_ : str = drop_path_rate A_ : List[Any] = hidden_act A_ : List[Any] = use_layer_scale A_ : Tuple = layer_scale_init_value A_ : Tuple = initializer_range super().__init__(**lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[int] = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 2E-3
686
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ = None ): '''simple docstring''' A_ : List[Any] = word_bank or [] # create a table A_ : int = len(lowerCamelCase__ ) + 1 A_ : list[list[list[str]]] = [] for _ in range(lowerCamelCase__ ): table.append([] ) # seed value A_ : Any = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowerCamelCase__ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowerCamelCase__ )] == word: A_ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowerCamelCase__ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowerCamelCase__ )]: combination.reverse() return table[len(lowerCamelCase__ )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
686
1
'''simple docstring''' import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) lowerCamelCase :Any = logging.getLogger() def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = {} A_ : str = os.path.join(lowerCamelCase__ , """all_results.json""" ) if os.path.exists(lowerCamelCase__ ): with open(lowerCamelCase__ , """r""" ) as f: A_ : Any = json.load(lowerCamelCase__ ) else: raise ValueError(f'can\'t find {path}' ) return results lowerCamelCase :Dict = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class _lowerCAmelCase ( __UpperCAmelCase ): def _a (self ): import xla_spawn A_ : int = self.get_auto_remove_tmp_dir() A_ : Optional[Any] = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split() with patch.object(lowercase , """argv""" , lowercase ): A_ : str = time() xla_spawn.main() A_ : Optional[Any] = time() A_ : List[Any] = get_results(lowercase ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def _a (self ): import xla_spawn A_ : Union[str, Any] = """ ./tests/test_trainer_tpu.py --num_cores=8 ./tests/test_trainer_tpu.py """.split() with patch.object(lowercase , """argv""" , lowercase ): xla_spawn.main()
686
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = [] A_ : int = set({"""(""", """[""", """{"""} ) A_ : Union[str, Any] = set({""")""", """]""", """}"""} ) A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(lowerCamelCase__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowerCamelCase__ ) == 0 def a ( ): '''simple docstring''' A_ : int = input("""Enter sequence of brackets: """ ) if is_balanced(lowerCamelCase__ ): print(lowerCamelCase__ , """is balanced""" ) else: print(lowerCamelCase__ , """is not balanced""" ) if __name__ == "__main__": main()
686
1
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise TypeError("""Input value must be an 'int' type""" ) A_ : str = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
686
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ): super().__init__( lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , ) A_ : Optional[int] = field A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths} A_ : Optional[Any] = Json( cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , ) def _a (self ): # Build iterable dataset if self.streaming: A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A_ : int = None A_ : Union[str, Any] = None A_ : int = None A_ : List[str] = None self.builder.download_and_prepare( download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , ) A_ : str = self.builder.as_dataset( split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory ) return dataset class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ): if num_proc is not None and num_proc <= 0: raise ValueError(F'num_proc {num_proc} must be an integer > 0.' ) A_ : Any = dataset A_ : List[str] = path_or_buf A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A_ : Optional[Any] = num_proc A_ : List[Any] = """utf-8""" A_ : int = to_json_kwargs def _a (self ): A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase ) A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" ) A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False ) A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True ) A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'`datasets` currently does not support {compression} compression' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer: A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'The compression parameter is not supported when writing to a buffer, but compression={compression}' """ was passed. Please provide a local path instead.""" ) A_ : Union[str, Any] = self._write( file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs ) return written def _a (self , lowercase ): A_, A_, A_, A_, A_ : List[str] = args A_ : List[str] = query_table( table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , ) A_ : Any = batch.to_pandas().to_json( path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ): A_ : Dict = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(lowercase ) else: A_, A_ : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(lowercase ) return written
686
1
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class _lowerCAmelCase ( unittest.TestCase ): def __init__(self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ): A_ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18} A_ : str = parent A_ : Optional[int] = batch_size A_ : Any = num_channels A_ : Tuple = image_size A_ : Any = min_resolution A_ : Tuple = max_resolution A_ : List[str] = do_resize A_ : int = size A_ : str = do_normalize A_ : str = image_mean A_ : List[Any] = image_std def _a (self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = DPTImageProcessor if is_vision_available() else None def _a (self ): A_ : Optional[Any] = DPTImageProcessingTester(self ) @property def _a (self ): return self.image_processor_tester.prepare_image_processor_dict() def _a (self ): A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , """image_mean""" ) ) self.assertTrue(hasattr(lowercase , """image_std""" ) ) self.assertTrue(hasattr(lowercase , """do_normalize""" ) ) self.assertTrue(hasattr(lowercase , """do_resize""" ) ) self.assertTrue(hasattr(lowercase , """size""" ) ) def _a (self ): A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) A_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def _a (self ): # Initialize image_processing A_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched A_ : Any = image_processing(lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def _a (self ): # Initialize image_processing A_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , np.ndarray ) # Test not batched input A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched A_ : Any = image_processing(lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def _a (self ): # Initialize image_processing A_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , torch.Tensor ) # Test not batched input A_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched A_ : Optional[Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
686
'''simple docstring''' import os import sys import unittest lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Tuple = get_test_to_tester_mapping(lowercase ) A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase ) A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""} A_ : Union[str, Any] = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) def _a (self ): A_ : Optional[Any] = get_model_to_test_mapping(lowercase ) A_ : List[str] = get_model_to_test_mapping(lowercase ) A_ : Dict = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } A_ : Any = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) def _a (self ): A_ : List[Any] = get_model_to_tester_mapping(lowercase ) A_ : Optional[int] = get_model_to_tester_mapping(lowercase ) A_ : Dict = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } A_ : Dict = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
686
1
'''simple docstring''' lowerCamelCase :dict[tuple[int, int, int], int] = {} def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on A_ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 ) A_ : Optional[Any] = state_late + state_absent + state_ontime A_ : Dict = prizestrings return prizestrings def a ( lowerCamelCase__ = 30 ): '''simple docstring''' return _calculate(lowerCamelCase__ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
686
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCamelCase :Any = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
686
1
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ ): '''simple docstring''' A_, A_ : Optional[Any] = emb.weight.shape A_ : str = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) A_ : Optional[int] = emb.weight.data return lin_layer def a ( lowerCamelCase__ , lowerCamelCase__=None ): '''simple docstring''' A_ : str = {} for old_key in state_dict.keys(): A_ : Optional[int] = old_key if "moe_layer.experts." in key: if expert_idx is not None: A_ : int = key.replace("""moe_layer.experts.0""" , f'ffn.experts.expert_{expert_idx}' ) else: A_ : List[str] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: A_ : int = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: A_ : Dict = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: A_ : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: A_ : List[Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: A_ : Tuple = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: A_ : List[str] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) A_ : Any = state_dict[old_key] return new_dict def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = WEIGHTS_NAME ): '''simple docstring''' A_ : Any = [] A_ : Dict = 0 os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) for expert in range(lowerCamelCase__ ): A_ : Optional[Any] = switch_checkpoint_path + f'-rank-{expert}.pt' if os.path.isfile(lowerCamelCase__ ): A_ : Dict = torch.load(lowerCamelCase__ )["""model"""] remove_ignore_keys_(lowerCamelCase__ ) A_ : Any = rename_fairseq_keys(lowerCamelCase__ , lowerCamelCase__ ) A_ : List[Any] = os.path.join( lowerCamelCase__ , weights_name.replace(""".bin""" , f'-{len(lowerCamelCase__ )+1:05d}-of-???.bin' ) ) torch.save(lowerCamelCase__ , lowerCamelCase__ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(lowerCamelCase__ )[0]].dtype ) # Add the last block A_ : List[str] = os.path.join(lowerCamelCase__ , weights_name.replace(""".bin""" , f'-{len(lowerCamelCase__ )+1:05d}-of-???.bin' ) ) A_ : Dict = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(lowerCamelCase__ ) A_ : str = rename_fairseq_keys(lowerCamelCase__ , lowerCamelCase__ ) A_ : Dict = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(lowerCamelCase__ ) == 1: A_ : List[str] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) torch.save(lowerCamelCase__ , lowerCamelCase__ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(lowerCamelCase__ , lowerCamelCase__ ) # Otherwise, let's build the index A_ : List[str] = {} for idx, shard in enumerate(lowerCamelCase__ ): A_ : Optional[Any] = weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-{len(lowerCamelCase__ ):05d}.bin' ) A_ : Dict = os.path.join(lowerCamelCase__ , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(lowerCamelCase__ , os.path.join(lowerCamelCase__ , lowerCamelCase__ ) ) for key in shard: A_ : Union[str, Any] = shard_file # Add the metadata A_ : Dict = {"""total_size""": total_size} A_ : Tuple = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , """w""" , encoding="""utf-8""" ) as f: A_ : Optional[Any] = json.dumps(lowerCamelCase__ , indent=2 , sort_keys=lowerCamelCase__ ) + """\n""" f.write(lowerCamelCase__ ) return metadata, index if __name__ == "__main__": lowerCamelCase :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) lowerCamelCase :Any = parser.parse_args() lowerCamelCase , lowerCamelCase :Dict = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) lowerCamelCase :Dict = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) lowerCamelCase :int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
686
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCamelCase :Any = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def a ( lowerCamelCase__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCamelCase :Tuple = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCamelCase :List[Any] = parser.parse_args() if args.check_lib: lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''') lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent else: lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
686
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = (UniPCMultistepScheduler,) __SCREAMING_SNAKE_CASE : Dict = (('num_inference_steps', 25),) def _a (self , **lowercase ): A_ : List[Any] = { """num_train_timesteps""": 1000, """beta_start""": 0.00_01, """beta_end""": 0.02, """beta_schedule""": """linear""", """solver_order""": 2, """solver_type""": """bh2""", } config.update(**lowercase ) return config def _a (self , lowercase=0 , **lowercase ): A_ : List[Any] = dict(self.forward_default_kwargs ) A_ : Optional[Any] = kwargs.pop("""num_inference_steps""" , lowercase ) A_ : Dict = self.dummy_sample A_ : Tuple = 0.1 * sample A_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A_ : List[Any] = self.get_scheduler_config(**lowercase ) A_ : List[Any] = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # copy over dummy past residuals A_ : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase ) A_ : str = scheduler_class.from_pretrained(lowercase ) new_scheduler.set_timesteps(lowercase ) # copy over dummy past residuals A_ : str = dummy_past_residuals[: new_scheduler.config.solver_order] A_, A_ : List[str] = sample, sample for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ): A_ : str = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample A_ : Tuple = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _a (self , lowercase=0 , **lowercase ): A_ : Optional[Any] = dict(self.forward_default_kwargs ) A_ : Optional[int] = kwargs.pop("""num_inference_steps""" , lowercase ) A_ : List[Any] = self.dummy_sample A_ : Optional[int] = 0.1 * sample A_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A_ : Union[str, Any] = self.get_scheduler_config() A_ : Dict = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # copy over dummy past residuals (must be after setting timesteps) A_ : Any = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase ) A_ : Optional[int] = scheduler_class.from_pretrained(lowercase ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase ) # copy over dummy past residual (must be after setting timesteps) A_ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A_ : Optional[Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample A_ : Optional[Any] = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _a (self , lowercase=None , **lowercase ): if scheduler is None: A_ : List[str] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config(**lowercase ) A_ : str = scheduler_class(**lowercase ) A_ : Optional[Any] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config(**lowercase ) A_ : Optional[Any] = scheduler_class(**lowercase ) A_ : List[str] = 10 A_ : List[Any] = self.dummy_model() A_ : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(lowercase ) for i, t in enumerate(scheduler.timesteps ): A_ : List[str] = model(lowercase , lowercase ) A_ : str = scheduler.step(lowercase , lowercase , lowercase ).prev_sample return sample def _a (self ): A_ : Optional[Any] = dict(self.forward_default_kwargs ) A_ : str = kwargs.pop("""num_inference_steps""" , lowercase ) for scheduler_class in self.scheduler_classes: A_ : Tuple = self.get_scheduler_config() A_ : Optional[Any] = scheduler_class(**lowercase ) A_ : List[str] = self.dummy_sample A_ : Union[str, Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase , """set_timesteps""" ): scheduler.set_timesteps(lowercase ) elif num_inference_steps is not None and not hasattr(lowercase , """set_timesteps""" ): A_ : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A_ : str = dummy_past_residuals[: scheduler.config.solver_order] A_ : Union[str, Any] = scheduler.timesteps[5] A_ : Dict = scheduler.timesteps[6] A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _a (self ): # make sure that iterating over schedulers with same config names gives same results # for defaults A_ : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A_ : List[str] = self.full_loop(scheduler=lowercase ) A_ : Any = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.24_64 ) < 1E-3 A_ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A_ : List[str] = DEISMultistepScheduler.from_config(scheduler.config ) A_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A_ : str = UniPCMultistepScheduler.from_config(scheduler.config ) A_ : int = self.full_loop(scheduler=lowercase ) A_ : List[Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.24_64 ) < 1E-3 def _a (self ): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def _a (self ): self.check_over_configs(thresholding=lowercase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , solver_order=lowercase , solver_type=lowercase , ) def _a (self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase ) def _a (self ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , ) A_ : Tuple = self.full_loop( solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , ) assert not torch.isnan(lowercase ).any(), "Samples have nan numbers" def _a (self ): self.check_over_configs(lower_order_final=lowercase ) self.check_over_configs(lower_order_final=lowercase ) def _a (self ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=lowercase , time_step=0 ) def _a (self ): A_ : Optional[Any] = self.full_loop() A_ : Optional[Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.24_64 ) < 1E-3 def _a (self ): A_ : Optional[Any] = self.full_loop(prediction_type="""v_prediction""" ) A_ : List[Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.10_14 ) < 1E-3 def _a (self ): A_ : Any = self.scheduler_classes[0] A_ : Union[str, Any] = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 ) A_ : str = scheduler_class(**lowercase ) A_ : Tuple = 10 A_ : List[Any] = self.dummy_model() A_ : Tuple = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase ) for i, t in enumerate(scheduler.timesteps ): A_ : Optional[int] = model(lowercase , lowercase ) A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase ).prev_sample assert sample.dtype == torch.floataa def _a (self , **lowercase ): for scheduler_class in self.scheduler_classes: A_ : Tuple = self.get_scheduler_config(**lowercase ) A_ : Dict = scheduler_class(**lowercase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
686
'''simple docstring''' lowerCamelCase :dict[tuple[int, int, int], int] = {} def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on A_ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 ) A_ : Optional[Any] = state_late + state_absent + state_ontime A_ : Dict = prizestrings return prizestrings def a ( lowerCamelCase__ = 30 ): '''simple docstring''' return _calculate(lowerCamelCase__ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
686
1
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = 1 A_ : Any = 2 while i * i <= n: A_ : List[str] = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def a ( ): '''simple docstring''' A_ : Dict = 1 A_ : Any = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase__ ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
686
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Tuple = 'linear' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial' __SCREAMING_SNAKE_CASE : Optional[int] = 'constant' __SCREAMING_SNAKE_CASE : str = 'constant_with_warmup' __SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant' def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) ) return 1.0 return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ): '''simple docstring''' A_ : Optional[Any] = {} A_ : Optional[Any] = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: A_, A_ : Union[str, Any] = rule_str.split(""":""" ) A_ : Union[str, Any] = int(lowerCamelCase__ ) A_ : List[Any] = float(lowerCamelCase__ ) A_ : Union[str, Any] = value A_ : Optional[int] = float(rule_list[-1] ) def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ): def rule_func(lowerCamelCase__ ) -> float: A_ : str = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCamelCase__ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) ) return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ): '''simple docstring''' A_ : Optional[Any] = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' ) def lr_lambda(lowerCamelCase__ ): if current_step < num_warmup_steps: return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A_ : str = lr_init - lr_end A_ : Tuple = num_training_steps - num_warmup_steps A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps A_ : Optional[int] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase :List[Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ): '''simple docstring''' A_ : Optional[Any] = SchedulerType(lowerCamelCase__ ) A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , ) return schedule_func( lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
686
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase :Optional[Any] = 1_6 lowerCamelCase :Dict = 3_2 def a ( lowerCamelCase__ , lowerCamelCase__ = 16 ): '''simple docstring''' A_ : Dict = AutoTokenizer.from_pretrained("""bert-base-cased""" ) A_ : str = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCamelCase__ ): # max_length=None => use the model max length (it's actually the default) A_ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Optional[Any] = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : Union[str, Any] = 16 elif accelerator.mixed_precision != "no": A_ : List[Any] = 8 else: A_ : Optional[Any] = None return tokenizer.pad( lowerCamelCase__ , padding="""longest""" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. A_ : Union[str, Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) A_ : List[str] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCamelCase :Dict = mocked_dataloaders # noqa: F811 def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCamelCase__ ) == "1": A_ : Dict = 2 # New Code # A_ : Tuple = int(args.gradient_accumulation_steps ) A_ : List[Any] = int(args.local_sgd_steps ) # Initialize accelerator A_ : List[str] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCamelCase__ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : Dict = config["""lr"""] A_ : Dict = int(config["""num_epochs"""] ) A_ : Union[str, Any] = int(config["""seed"""] ) A_ : List[str] = int(config["""batch_size"""] ) A_ : List[str] = evaluate.load("""glue""" , """mrpc""" ) set_seed(lowerCamelCase__ ) A_, A_ : Optional[int] = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : List[Any] = model.to(accelerator.device ) # Instantiate optimizer A_ : List[Any] = AdamW(params=model.parameters() , lr=lowerCamelCase__ ) # Instantiate scheduler A_ : str = get_linear_schedule_with_warmup( optimizer=lowerCamelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_, A_, A_, A_, A_ : Any = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Now we train the model for epoch in range(lowerCamelCase__ ): model.train() with LocalSGD( accelerator=lowerCamelCase__ , model=lowerCamelCase__ , local_sgd_steps=lowerCamelCase__ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowerCamelCase__ ): A_ : int = model(**lowerCamelCase__ ) A_ : Any = output.loss accelerator.backward(lowerCamelCase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A_ : Any = model(**lowerCamelCase__ ) A_ : Optional[Any] = outputs.logits.argmax(dim=-1 ) A_, A_ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCamelCase__ , references=lowerCamelCase__ , ) A_ : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , lowerCamelCase__ ) def a ( ): '''simple docstring''' A_ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=lowerCamelCase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=lowerCamelCase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) A_ : str = parser.parse_args() A_ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
686
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''') lowerCamelCase :int = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } lowerCamelCase :List[str] = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } lowerCamelCase :Union[str, Any] = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } lowerCamelCase :Dict = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } lowerCamelCase :int = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } lowerCamelCase :str = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } lowerCamelCase :List[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } lowerCamelCase :Tuple = [] lowerCamelCase :Dict = [] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for attribute in key.split(""".""" ): A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: A_ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Dict = value elif weight_type == "bias": A_ : Dict = value elif weight_type == "running_mean": A_ : Optional[Any] = value elif weight_type == "running_var": A_ : int = value elif weight_type == "num_batches_tracked": A_ : Optional[Any] = value elif weight_type == "weight_ih_l0": A_ : Optional[int] = value elif weight_type == "weight_hh_l0": A_ : Union[str, Any] = value elif weight_type == "bias_ih_l0": A_ : Optional[int] = value elif weight_type == "bias_hh_l0": A_ : Tuple = value elif weight_type == "weight_ih_l1": A_ : Optional[int] = value elif weight_type == "weight_hh_l1": A_ : Dict = value elif weight_type == "bias_ih_l1": A_ : Optional[int] = value elif weight_type == "bias_hh_l1": A_ : Tuple = value else: A_ : Any = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A_, A_ : List[str] = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = [] if model_name == "encodec_24khz" or "encodec_32khz": A_ : List[str] = MAPPING_24K elif model_name == "encodec_48khz": A_ : str = MAPPING_48K else: raise ValueError(f'Unsupported model: {model_name}' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase__ , lowerCamelCase__ ): logger.info(f'{name} was ignored' ) continue A_ : str = False for key, mapped_key in MAPPING.items(): if "*" in key: A_, A_ : List[Any] = key.split(""".*.""" ) if prefix in name and suffix in name: A_ : Optional[Any] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ): continue A_ : Union[str, Any] = True if "*" in mapped_key: A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2] A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ ) if "weight_g" in name: A_ : Any = """weight_g""" elif "weight_v" in name: A_ : Tuple = """weight_v""" elif "weight_ih_l0" in name: A_ : Union[str, Any] = """weight_ih_l0""" elif "weight_hh_l0" in name: A_ : Tuple = """weight_hh_l0""" elif "bias_ih_l0" in name: A_ : str = """bias_ih_l0""" elif "bias_hh_l0" in name: A_ : List[Any] = """bias_hh_l0""" elif "weight_ih_l1" in name: A_ : Dict = """weight_ih_l1""" elif "weight_hh_l1" in name: A_ : Any = """weight_hh_l1""" elif "bias_ih_l1" in name: A_ : Optional[int] = """bias_ih_l1""" elif "bias_hh_l1" in name: A_ : List[Any] = """bias_hh_l1""" elif "bias" in name: A_ : List[str] = """bias""" elif "weight" in name: A_ : Optional[int] = """weight""" elif "running_mean" in name: A_ : Union[str, Any] = """running_mean""" elif "running_var" in name: A_ : Optional[int] = """running_var""" elif "num_batches_tracked" in name: A_ : List[Any] = """num_batches_tracked""" else: A_ : str = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f'Unused weights: {unused_weights}' ) @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): '''simple docstring''' if config_path is not None: A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ ) else: A_ : Optional[int] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A_ : Dict = [8, 5, 4, 4] A_ : Optional[Any] = [2.2] A_ : Tuple = 64 A_ : Tuple = 3_20_00 A_ : List[Any] = 20_48 A_ : Optional[Any] = False A_ : str = False A_ : Optional[int] = False elif model_name == "encodec_48khz": A_ : Dict = [8, 5, 4, 2] A_ : Tuple = [3.0, 6.0, 12.0, 24.0] A_ : List[Any] = 4_80_00 A_ : Dict = 2 A_ : Dict = False A_ : Dict = """time_group_norm""" A_ : Optional[Any] = True A_ : str = 1.0 A_ : Any = 0.01 else: raise ValueError(f'Unknown model name: {model_name}' ) A_ : Dict = EncodecModel(lowerCamelCase__ ) A_ : Any = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(lowerCamelCase__ ) A_ : int = torch.load(lowerCamelCase__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A_ : Tuple = original_checkpoint["""best_state"""] recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print("""Pushing to the hub...""" ) feature_extractor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Any = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowerCamelCase :Dict = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
686
1
'''simple docstring''' from __future__ import annotations from random import random from typing import Generic, TypeVar lowerCamelCase :int = TypeVar('''KT''') lowerCamelCase :Union[str, Any] = TypeVar('''VT''') class _lowerCAmelCase ( Generic[KT, VT] ): def __init__(self , lowercase = "root" , lowercase = None ): A_ : Union[str, Any] = key A_ : int = value A_ : list[Node[KT, VT]] = [] def __repr__(self ): return F'Node({self.key}: {self.value})' @property def _a (self ): return len(self.forward ) class _lowerCAmelCase ( Generic[KT, VT] ): def __init__(self , lowercase = 0.5 , lowercase = 16 ): A_ : Node[KT, VT] = Node[KT, VT]() A_ : Tuple = 0 A_ : Tuple = p A_ : int = max_level def __str__(self ): A_ : Union[str, Any] = list(self ) if len(lowercase ) == 0: return F'SkipList(level={self.level})' A_ : Dict = max((len(str(lowercase ) ) for item in items) , default=4 ) A_ : str = max(lowercase , 4 ) + 4 A_ : Optional[int] = self.head A_ : Dict = [] A_ : Optional[Any] = node.forward.copy() lines.append(F'[{node.key}]'.ljust(lowercase , """-""" ) + """* """ * len(lowercase ) ) lines.append(""" """ * label_size + """| """ * len(lowercase ) ) while len(node.forward ) != 0: A_ : Optional[Any] = node.forward[0] lines.append( F'[{node.key}]'.ljust(lowercase , """-""" ) + """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) ) lines.append(""" """ * label_size + """| """ * len(lowercase ) ) A_ : Dict = node.forward lines.append("""None""".ljust(lowercase ) + """* """ * len(lowercase ) ) return F'SkipList(level={self.level})\n' + "\n".join(lowercase ) def __iter__(self ): A_ : Optional[int] = self.head while len(node.forward ) != 0: yield node.forward[0].key A_ : str = node.forward[0] def _a (self ): A_ : Dict = 1 while random() < self.p and level < self.max_level: level += 1 return level def _a (self , lowercase ): A_ : Union[str, Any] = [] A_ : List[Any] = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: A_ : List[Any] = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(lowercase ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _a (self , lowercase ): A_, A_ : List[Any] = self._locate_node(lowercase ) if node is not None: for i, update_node in enumerate(lowercase ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: A_ : List[Any] = node.forward[i] else: A_ : List[str] = update_node.forward[:i] def _a (self , lowercase , lowercase ): A_, A_ : Union[str, Any] = self._locate_node(lowercase ) if node is not None: A_ : Any = value else: A_ : List[str] = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , lowercase ): update_vector.append(self.head ) A_ : List[str] = level A_ : List[str] = Node(lowercase , lowercase ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(lowercase ) else: A_ : str = new_node def _a (self , lowercase ): A_, A_ : List[str] = self._locate_node(lowercase ) if node is not None: return node.value return None def a ( ): '''simple docstring''' A_ : int = SkipList() skip_list.insert("""Key1""" , 3 ) skip_list.insert("""Key2""" , 12 ) skip_list.insert("""Key3""" , 41 ) skip_list.insert("""Key4""" , -19 ) A_ : List[Any] = skip_list.head A_ : List[str] = {} while node.level != 0: A_ : Tuple = node.forward[0] A_ : List[Any] = node.value assert len(lowerCamelCase__ ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def a ( ): '''simple docstring''' A_ : int = SkipList() skip_list.insert("""Key1""" , 10 ) skip_list.insert("""Key1""" , 12 ) skip_list.insert("""Key5""" , 7 ) skip_list.insert("""Key7""" , 10 ) skip_list.insert("""Key10""" , 5 ) skip_list.insert("""Key7""" , 7 ) skip_list.insert("""Key5""" , 5 ) skip_list.insert("""Key10""" , 10 ) A_ : Dict = skip_list.head A_ : Tuple = {} while node.level != 0: A_ : Tuple = node.forward[0] A_ : Optional[Any] = node.value if len(lowerCamelCase__ ) != 4: print() assert len(lowerCamelCase__ ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def a ( ): '''simple docstring''' A_ : Any = SkipList() assert skip_list.find("""Some key""" ) is None def a ( ): '''simple docstring''' A_ : Tuple = SkipList() skip_list.insert("""Key2""" , 20 ) assert skip_list.find("""Key2""" ) == 20 skip_list.insert("""Some Key""" , 10 ) skip_list.insert("""Key2""" , 8 ) skip_list.insert("""V""" , 13 ) assert skip_list.find("""Y""" ) is None assert skip_list.find("""Key2""" ) == 8 assert skip_list.find("""Some Key""" ) == 10 assert skip_list.find("""V""" ) == 13 def a ( ): '''simple docstring''' A_ : Optional[int] = SkipList() skip_list.delete("""Some key""" ) assert len(skip_list.head.forward ) == 0 def a ( ): '''simple docstring''' A_ : List[Any] = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""Key2""" ) is None def a ( ): '''simple docstring''' A_ : List[str] = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) == 14 assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""X""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key1""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) is None def a ( ): '''simple docstring''' A_ : Any = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 1_42 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""X""" ) def traverse_keys(lowerCamelCase__ ): yield node.key for forward_node in node.forward: yield from traverse_keys(lowerCamelCase__ ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def a ( ): '''simple docstring''' def is_sorted(lowerCamelCase__ ): return all(next_item >= item for item, next_item in zip(lowerCamelCase__ , lst[1:] ) ) A_ : List[str] = SkipList() for i in range(10 ): skip_list.insert(lowerCamelCase__ , lowerCamelCase__ ) assert is_sorted(list(lowerCamelCase__ ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(lowerCamelCase__ ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(lowerCamelCase__ ) ) def a ( ): '''simple docstring''' for _ in range(1_00 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def a ( ): '''simple docstring''' A_ : List[Any] = SkipList() skip_list.insert(2 , """2""" ) skip_list.insert(4 , """4""" ) skip_list.insert(6 , """4""" ) skip_list.insert(4 , """5""" ) skip_list.insert(8 , """4""" ) skip_list.insert(9 , """4""" ) skip_list.delete(4 ) print(lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
686
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :Any = logging.get_logger(__name__) lowerCamelCase :Any = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = 'beit' def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ): super().__init__(**lowercase ) A_ : Union[str, Any] = vocab_size A_ : List[str] = hidden_size A_ : Optional[int] = num_hidden_layers A_ : Tuple = num_attention_heads A_ : List[Any] = intermediate_size A_ : Optional[int] = hidden_act A_ : str = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : Dict = initializer_range A_ : str = layer_norm_eps A_ : Any = image_size A_ : int = patch_size A_ : List[str] = num_channels A_ : Any = use_mask_token A_ : Dict = use_absolute_position_embeddings A_ : List[Any] = use_relative_position_bias A_ : Tuple = use_shared_relative_position_bias A_ : Optional[int] = layer_scale_init_value A_ : Tuple = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : Optional[int] = use_auxiliary_head A_ : Union[str, Any] = auxiliary_loss_weight A_ : Tuple = auxiliary_channels A_ : List[Any] = auxiliary_num_convs A_ : Dict = auxiliary_concat_input A_ : Optional[Any] = semantic_loss_ignore_index class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4
686
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class _lowerCAmelCase ( unittest.TestCase ): def __init__(self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p A_ : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} A_ : Union[str, Any] = parent A_ : Optional[int] = batch_size A_ : Optional[int] = num_channels A_ : int = min_resolution A_ : Optional[Any] = max_resolution A_ : List[Any] = do_resize A_ : List[Any] = size A_ : Any = do_normalize A_ : Union[str, Any] = image_mean A_ : Union[str, Any] = image_std A_ : Dict = do_rescale A_ : List[str] = rescale_factor A_ : Any = do_pad def _a (self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _a (self , lowercase , lowercase=False ): if not batched: A_ : int = image_inputs[0] if isinstance(lowercase , Image.Image ): A_, A_ : int = image.size else: A_, A_ : List[Any] = image.shape[1], image.shape[2] if w < h: A_ : Any = int(self.size["""shortest_edge"""] * h / w ) A_ : int = self.size["""shortest_edge"""] elif w > h: A_ : List[Any] = self.size["""shortest_edge"""] A_ : Optional[int] = int(self.size["""shortest_edge"""] * w / h ) else: A_ : List[str] = self.size["""shortest_edge"""] A_ : Union[str, Any] = self.size["""shortest_edge"""] else: A_ : Optional[int] = [] for image in image_inputs: A_, A_ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ : int = max(lowercase , key=lambda lowercase : item[0] )[0] A_ : int = max(lowercase , key=lambda lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None def _a (self ): A_ : str = ConditionalDetrImageProcessingTester(self ) @property def _a (self ): return self.image_processor_tester.prepare_image_processor_dict() def _a (self ): A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , """image_mean""" ) ) self.assertTrue(hasattr(lowercase , """image_std""" ) ) self.assertTrue(hasattr(lowercase , """do_normalize""" ) ) self.assertTrue(hasattr(lowercase , """do_resize""" ) ) self.assertTrue(hasattr(lowercase , """size""" ) ) def _a (self ): A_ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , lowercase ) A_ : Dict = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , lowercase ) def _a (self ): pass def _a (self ): # Initialize image_processing A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_, A_ : str = self.image_processor_tester.get_expected_values(lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_, A_ : Tuple = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase ) A_ : Optional[int] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a (self ): # Initialize image_processing A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , np.ndarray ) # Test not batched input A_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_, A_ : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : Union[str, Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values A_, A_ : int = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a (self ): # Initialize image_processing A_ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , torch.Tensor ) # Test not batched input A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_, A_ : str = self.image_processor_tester.get_expected_values(lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : Optional[Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values A_, A_ : Any = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _a (self ): # prepare image and target A_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: A_ : Dict = json.loads(f.read() ) A_ : Any = {"""image_id""": 39769, """annotations""": target} # encode them A_ : str = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" ) A_ : Any = image_processing(images=lowercase , annotations=lowercase , return_tensors="""pt""" ) # verify pixel values A_ : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , lowercase ) A_ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase , atol=1E-4 ) ) # verify area A_ : Optional[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase ) ) # verify boxes A_ : Any = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase ) A_ : Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase , atol=1E-3 ) ) # verify image_id A_ : Union[str, Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase ) ) # verify is_crowd A_ : str = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase ) ) # verify class_labels A_ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase ) ) # verify orig_size A_ : Dict = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase ) ) # verify size A_ : Optional[int] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase ) ) @slow def _a (self ): # prepare image, target and masks_path A_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: A_ : List[Any] = json.loads(f.read() ) A_ : int = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} A_ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them A_ : Dict = ConditionalDetrImageProcessor(format="""coco_panoptic""" ) A_ : Dict = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="""pt""" ) # verify pixel values A_ : str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , lowercase ) A_ : Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase , atol=1E-4 ) ) # verify area A_ : Union[str, Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase ) ) # verify boxes A_ : List[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase ) A_ : str = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase , atol=1E-3 ) ) # verify image_id A_ : Optional[int] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase ) ) # verify is_crowd A_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase ) ) # verify class_labels A_ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase ) ) # verify masks A_ : Union[str, Any] = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase ) # verify orig_size A_ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase ) ) # verify size A_ : List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase ) )
686
'''simple docstring''' import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel lowerCamelCase :Optional[int] = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 1_3_1_0_7_2, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, } def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2 def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2 A_ : List[Any] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ ) class _lowerCAmelCase ( __UpperCAmelCase ): pass class _lowerCAmelCase ( nn.Module ): def __init__(self , lowercase ): super().__init__() A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 ) A_ : str = deepcopy(self.diffusion ) A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = MODELS_MAP[model_name]["""url"""] os.system(f'wget {url} ./' ) return f'./{model_name}.ckpt' lowerCamelCase :str = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } lowerCamelCase :str = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } lowerCamelCase :str = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } lowerCamelCase :int = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } lowerCamelCase :List[Any] = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } lowerCamelCase :Optional[Any] = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def a ( lowerCamelCase__ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""" , RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'ResConvBlock error with {name}' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def a ( lowerCamelCase__ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ): return name.replace(lowerCamelCase__ , lowerCamelCase__ ) elif name.startswith(lowerCamelCase__ ): return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value] raise ValueError(f'Attn error with {name}' ) def a ( lowerCamelCase__ , lowerCamelCase__=13 ): '''simple docstring''' A_ : Union[str, Any] = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""" , """time_proj""" ) A_ : Dict = 0 if string.startswith("""net.3.""" ): depth += 1 A_ : int = string[6:] elif string.startswith("""net.""" ): A_ : Tuple = string[4:] while string.startswith("""main.7.""" ): depth += 1 A_ : Dict = string[7:] if string.startswith("""main.""" ): A_ : Union[str, Any] = string[5:] # mid block if string[:2].isdigit(): A_ : Optional[Any] = string[:2] A_ : Optional[Any] = string[2:] else: A_ : List[Any] = string[0] A_ : Dict = string[1:] if depth == max_depth: A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num] A_ : Optional[Any] = """mid_block""" elif depth > 0 and int(lowerCamelCase__ ) < 7: A_ : Any = DOWN_NUM_TO_LAYER[layer_num] A_ : Union[str, Any] = f'down_blocks.{depth}' elif depth > 0 and int(lowerCamelCase__ ) > 7: A_ : List[str] = UP_NUM_TO_LAYER[layer_num] A_ : List[str] = f'up_blocks.{max_depth - depth - 1}' elif depth == 0: A_ : str = DEPTH_0_TO_LAYER[layer_num] A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' ) A_ : Optional[int] = string_left[1:] if "resnets" in new_layer: A_ : Tuple = convert_resconv_naming(lowerCamelCase__ ) elif "attentions" in new_layer: A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ ) A_ : Dict = new_string_left if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left else: A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue A_ : List[Any] = rename(lowerCamelCase__ ) # check if we need to transform from Conv => Linear for attention if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: A_ : int = v return new_state_dict def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if len(lowerCamelCase__ ) == 1: if len(v.shape ) == 3: # weight A_ : Optional[Any] = v[:, :, 0] else: # bias A_ : Union[str, Any] = v else: # qkv matrices A_ : Optional[int] = v.shape[0] A_ : str = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0] else: A_ : str = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}' A_ : int = download(lowerCamelCase__ ) A_ : Any = MODELS_MAP[model_name]["""sample_rate"""] A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""] A_ : Tuple = Object() A_ : Union[str, Any] = sample_size A_ : Tuple = sample_rate A_ : int = 0 A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ ) A_ : Optional[Any] = diffusers_model.state_dict() A_ : Dict = DiffusionUncond(lowerCamelCase__ ) orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] ) A_ : Any = orig_model.diffusion_ema.eval() A_ : Any = orig_model.state_dict() A_ : List[str] = rename_orig_weights(lowerCamelCase__ ) A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}' assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}' if key == "time_proj.weight": A_ : str = value.squeeze() A_ : Union[str, Any] = value diffusers_model.load_state_dict(lowerCamelCase__ ) A_ : Optional[Any] = 1_00 A_ : Union[str, Any] = 33 A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ ) A_ : List[str] = torch.manual_seed(lowerCamelCase__ ) A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ ) A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1] A_ : List[Any] = get_crash_schedule(lowerCamelCase__ ) A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ ) A_ : str = torch.manual_seed(33 ) A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} ) A_ : str = generated.clamp(-1 , 1 ) A_ : List[Any] = (generated - audio).abs().sum() A_ : int = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""" , lowerCamelCase__ ) print("""Diff max""" , lowerCamelCase__ ) assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/' print(f'Conversion for {model_name} successful!' ) if __name__ == "__main__": lowerCamelCase :int = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') lowerCamelCase :List[str] = parser.parse_args() main(args)
686
1
'''simple docstring''' from collections import deque class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase ): A_ : Dict = process_name # process name A_ : Optional[int] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A_ : Tuple = arrival_time A_ : Optional[Any] = burst_time # remaining burst time A_ : Optional[Any] = 0 # total time of the process wait in ready queue A_ : Any = 0 # time from arrival time to completion time class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase , lowercase , ): # total number of mlfq's queues A_ : Union[str, Any] = number_of_queues # time slice of queues that round robin algorithm applied A_ : Tuple = time_slices # unfinished process is in this ready_queue A_ : Union[str, Any] = queue # current time A_ : Any = current_time # finished process is in this sequence queue A_ : deque[Process] = deque() def _a (self ): A_ : Tuple = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def _a (self , lowercase ): A_ : Union[str, Any] = [] for i in range(len(lowercase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def _a (self , lowercase ): A_ : str = [] for i in range(len(lowercase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def _a (self , lowercase ): A_ : Any = [] for i in range(len(lowercase ) ): completion_times.append(queue[i].stop_time ) return completion_times def _a (self , lowercase ): return [q.burst_time for q in queue] def _a (self , lowercase ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def _a (self , lowercase ): A_ : deque[Process] = deque() # sequence deque of finished process while len(lowercase ) != 0: A_ : Optional[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(lowercase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A_ : int = 0 # set the process's turnaround time because it is finished A_ : str = self.current_time - cp.arrival_time # set the completion time A_ : Union[str, Any] = self.current_time # add the process to queue that has finished queue finished.append(lowercase ) self.finish_queue.extend(lowercase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def _a (self , lowercase , lowercase ): A_ : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(lowercase ) ): A_ : List[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(lowercase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A_ : str = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(lowercase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A_ : int = 0 # set the finish time A_ : Tuple = self.current_time # update the process' turnaround time because it is finished A_ : Dict = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(lowercase ) self.finish_queue.extend(lowercase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def _a (self ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): A_, A_ : List[str] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest lowerCamelCase :Tuple = Process('''P1''', 0, 5_3) lowerCamelCase :int = Process('''P2''', 0, 1_7) lowerCamelCase :Optional[int] = Process('''P3''', 0, 6_8) lowerCamelCase :Union[str, Any] = Process('''P4''', 0, 2_4) lowerCamelCase :Any = 3 lowerCamelCase :Optional[int] = [1_7, 2_5] lowerCamelCase :Optional[int] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) lowerCamelCase :int = Process('''P1''', 0, 5_3) lowerCamelCase :str = Process('''P2''', 0, 1_7) lowerCamelCase :str = Process('''P3''', 0, 6_8) lowerCamelCase :List[str] = Process('''P4''', 0, 2_4) lowerCamelCase :Optional[Any] = 3 lowerCamelCase :Union[str, Any] = [1_7, 2_5] lowerCamelCase :int = deque([Pa, Pa, Pa, Pa]) lowerCamelCase :Dict = MLFQ(number_of_queues, time_slices, queue, 0) lowerCamelCase :Optional[Any] = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F"waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print completion times of processes(P1, P2, P3, P4) print( F"completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print total turnaround times of processes(P1, P2, P3, P4) print( F"turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print sequence of finished processes print( F"sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}" )
686
'''simple docstring''' from math import factorial def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) ) coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
686
1
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Union[str, Any] = [10, 20, 30, 40, 50, 60] A_ : Union[str, Any] = [2, 4, 6, 8, 10, 12] A_ : Any = 100 self.assertEqual(kp.calc_profit(lowercase , lowercase , lowercase ) , 210 ) def _a (self ): self.assertRaisesRegex(lowercase , """max_weight must greater than zero.""" ) def _a (self ): self.assertRaisesRegex(lowercase , """Weight can not be negative.""" ) def _a (self ): self.assertRaisesRegex(lowercase , """Profit can not be negative.""" ) def _a (self ): self.assertRaisesRegex(lowercase , """max_weight must greater than zero.""" ) def _a (self ): self.assertRaisesRegex( lowercase , """The length of profit and weight must be same.""" ) if __name__ == "__main__": unittest.main()
686
'''simple docstring''' import re def a ( lowerCamelCase__ ): '''simple docstring''' if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
686
1
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _lowerCAmelCase : def __init__(self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase="divided_space_time" , lowercase=None , ): A_ : Tuple = parent A_ : Optional[int] = batch_size A_ : Optional[int] = image_size A_ : str = num_channels A_ : Any = patch_size A_ : Union[str, Any] = num_frames A_ : Dict = is_training A_ : Dict = use_labels A_ : List[str] = hidden_size A_ : int = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : int = intermediate_size A_ : Any = hidden_act A_ : int = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : List[str] = attention_type A_ : str = initializer_range A_ : List[Any] = scope A_ : Dict = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token A_ : List[Any] = (image_size // patch_size) ** 2 A_ : Any = (num_frames) * self.num_patches_per_frame + 1 def _a (self ): A_ : Union[str, Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) A_ : str = None if self.use_labels: A_ : int = ids_tensor([self.batch_size] , self.num_labels ) A_ : int = self.get_config() return config, pixel_values, labels def _a (self ): A_ : List[str] = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) A_ : List[str] = self.num_labels return config def _a (self , lowercase , lowercase , lowercase ): A_ : List[str] = TimesformerModel(config=lowercase ) model.to(lowercase ) model.eval() A_ : Optional[int] = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a (self , lowercase , lowercase , lowercase ): A_ : Optional[int] = TimesformerForVideoClassification(lowercase ) model.to(lowercase ) model.eval() A_ : Optional[int] = model(lowercase ) # verify the logits shape A_ : int = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowercase ) def _a (self ): A_ : Tuple = self.prepare_config_and_inputs() A_, A_, A_ : str = config_and_inputs A_ : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Dict = ( {'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : int = False def _a (self ): A_ : int = TimesformerModelTester(self ) A_ : Dict = ConfigTester( self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 ) def _a (self , lowercase , lowercase , lowercase=False ): A_ : Any = copy.deepcopy(lowercase ) if return_labels: if model_class in get_values(lowercase ): A_ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase ) return inputs_dict def _a (self ): self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def _a (self ): pass def _a (self ): A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Any = model_class(lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) ) def _a (self ): A_, A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Any = model_class(lowercase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Optional[Any] = [*signature.parameters.keys()] A_ : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase ) def _a (self ): A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def _a (self ): A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase ) @slow def _a (self ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Tuple = TimesformerModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def _a (self ): if not self.has_attentions: pass else: A_, A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = True for model_class in self.all_model_classes: A_ : List[Any] = self.model_tester.seq_length A_ : Union[str, Any] = self.model_tester.num_frames A_ : Any = True A_ : int = False A_ : List[str] = True A_ : Any = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) ) A_ : int = outputs.attentions self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A_ : Optional[int] = True A_ : Optional[Any] = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): A_ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase ) ) A_ : Dict = outputs.attentions self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) A_ : Optional[int] = len(lowercase ) # Check attention is always last and order is fine A_ : str = True A_ : Optional[int] = True A_ : List[Any] = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): A_ : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) ) self.assertEqual(out_len + 1 , len(lowercase ) ) A_ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def _a (self ): def check_hidden_states_output(lowercase , lowercase , lowercase ): A_ : List[str] = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(lowercase , lowercase ) ) A_ : Union[str, Any] = outputs.hidden_states A_ : Optional[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase ) , lowercase ) A_ : Any = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) A_, A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : Dict = True check_hidden_states_output(lowercase , lowercase , lowercase ) def a ( ): '''simple docstring''' A_ : List[str] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) A_ : List[str] = np.load(lowerCamelCase__ ) return list(lowerCamelCase__ ) @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def _a (self ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _a (self ): A_ : Union[str, Any] = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( lowercase ) A_ : List[Any] = self.default_image_processor A_ : Union[str, Any] = prepare_video() A_ : List[Any] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase ) # forward pass with torch.no_grad(): A_ : Tuple = model(**lowercase ) # verify the logits A_ : Union[str, Any] = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , lowercase ) A_ : str = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
686
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCamelCase__ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCamelCase__ ): http_head("""https://huggingface.co""" )
686
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer lowerCamelCase :int = logging.get_logger(__name__) lowerCamelCase :Optional[int] = {'''vocab_file''': '''vocab.txt'''} lowerCamelCase :Dict = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } lowerCamelCase :Tuple = { '''YituTech/conv-bert-base''': 5_1_2, '''YituTech/conv-bert-medium-small''': 5_1_2, '''YituTech/conv-bert-small''': 5_1_2, } lowerCamelCase :List[str] = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Any = ConvBertTokenizer def __init__(self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ): super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars ): A_ : int = getattr(lowercase , normalizer_state.pop("""type""" ) ) A_ : Any = do_lower_case A_ : Any = strip_accents A_ : str = tokenize_chinese_chars A_ : Dict = normalizer_class(**lowercase ) A_ : Dict = do_lower_case def _a (self , lowercase , lowercase=None ): A_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a (self , lowercase , lowercase = None ): A_ : List[str] = [self.sep_token_id] A_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a (self , lowercase , lowercase = None ): A_ : Any = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
686
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCamelCase :Any = re.compile(R'''\s+''') def a ( lowerCamelCase__ ): '''simple docstring''' return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def a ( lowerCamelCase__ , lowerCamelCase__=5 ): '''simple docstring''' A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""] A_ : Optional[int] = example["""content"""].splitlines() for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ): '''simple docstring''' A_ : Any = ["""unit tests""", """test file""", """configuration file"""] A_ : List[str] = example["""content"""].splitlines() A_ : str = 0 A_ : Union[str, Any] = 0 # first test for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test A_ : List[Any] = example["""content"""].count("""\n""" ) A_ : Any = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = ["""def """, """class """, """for """, """while """] A_ : Optional[int] = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def a ( lowerCamelCase__ , lowerCamelCase__=4 ): '''simple docstring''' A_ : Tuple = example["""content"""].splitlines() A_ : int = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""] A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ ) return {"ratio": ratio} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = {} results.update(get_hash(lowerCamelCase__ ) ) results.update(line_stats(lowerCamelCase__ ) ) results.update(alpha_stats(lowerCamelCase__ ) ) results.update(char_token_ratio(lowerCamelCase__ ) ) results.update(is_autogenerated(lowerCamelCase__ ) ) results.update(is_config_or_test(lowerCamelCase__ ) ) results.update(has_no_keywords(lowerCamelCase__ ) ) results.update(has_few_assignments(lowerCamelCase__ ) ) return results def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def a ( lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ , """rb""" ) as f_in: with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ ) os.unlink(lowerCamelCase__ ) # Settings lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments) lowerCamelCase :Tuple = parser.parse_args() if args.num_workers is None: lowerCamelCase :Tuple = multiprocessing.cpu_count() lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCamelCase :List[Any] = time.time() lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''') print(F"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCamelCase :int = time.time() lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers) print(F"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCamelCase :int = set(ds.unique('''hash''')) lowerCamelCase :List[str] = len(uniques) / len(ds) print(F"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCamelCase :Dict = time.time() lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"Time to filter dataset: {time.time()-t_start:.2f}") print(F"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCamelCase :List[str] = time.time() lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(F"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCamelCase :int = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) lowerCamelCase :Tuple = output_dir / '''data''' data_dir.mkdir(exist_ok=True) lowerCamelCase :Tuple = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json") lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"Time to save dataset: {time.time()-t_start:.2f}")
686
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : Tuple = BlenderbotConfig __SCREAMING_SNAKE_CASE : int = {} __SCREAMING_SNAKE_CASE : Tuple = 'gelu' def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ): A_ : Dict = parent A_ : Tuple = batch_size A_ : Any = seq_length A_ : List[str] = is_training A_ : int = use_labels A_ : Tuple = vocab_size A_ : Dict = hidden_size A_ : List[Any] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : Optional[int] = intermediate_size A_ : int = hidden_dropout_prob A_ : Union[str, Any] = attention_probs_dropout_prob A_ : Union[str, Any] = max_position_embeddings A_ : List[str] = eos_token_id A_ : Optional[Any] = pad_token_id A_ : int = bos_token_id def _a (self ): A_ : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A_ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A_ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 ) A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A_ : Dict = prepare_blenderbot_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def _a (self , lowercase , lowercase ): A_ : Tuple = TFBlenderbotModel(config=lowercase ).get_decoder() A_ : int = inputs_dict["""input_ids"""] A_ : List[str] = input_ids[:1, :] A_ : List[Any] = inputs_dict["""attention_mask"""][:1, :] A_ : Optional[int] = inputs_dict["""head_mask"""] A_ : int = 1 # first forward pass A_ : Optional[Any] = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) A_, A_ : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) A_ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A_ : Dict = model(lowercase , attention_mask=lowercase )[0] A_ : int = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A_ : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx] A_ : List[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ): '''simple docstring''' if attention_mask is None: A_ : List[str] = tf.cast(tf.math.not_equal(lowerCamelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A_ : Optional[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A_ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A_ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A_ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __SCREAMING_SNAKE_CASE : Optional[Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE : List[Any] = ( { 'conversational': TFBlenderbotForConditionalGeneration, 'feature-extraction': TFBlenderbotModel, 'summarization': TFBlenderbotForConditionalGeneration, 'text2text-generation': TFBlenderbotForConditionalGeneration, 'translation': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : Optional[int] = False __SCREAMING_SNAKE_CASE : Tuple = False def _a (self ): A_ : Dict = TFBlenderbotModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=lowercase ) def _a (self ): self.config_tester.run_common_tests() def _a (self ): A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_tokenizers @require_tf class _lowerCAmelCase ( unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = ['My friends are cool but they eat too many carbs.'] __SCREAMING_SNAKE_CASE : str = 'facebook/blenderbot-400M-distill' @cached_property def _a (self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def _a (self ): A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def _a (self ): A_ : Tuple = self.tokenizer(self.src_text , return_tensors="""tf""" ) A_ : str = self.model.generate( model_inputs.input_ids , ) A_ : Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
686
'''simple docstring''' import pytest lowerCamelCase :Optional[Any] = '''__dummy_dataset1__''' lowerCamelCase :List[Any] = ''' import json import os import datasets REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", ] ) ), "langs": datasets.Sequence(datasets.Value("string")), "spans": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, json.loads(line) ''' @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def a ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = dataset_loading_script_name A_ : int = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase__ ) A_ : Tuple = script_dir / f'{script_name}.py' with open(lowerCamelCase__ , """w""" ) as f: f.write(lowerCamelCase__ ) return str(lowerCamelCase__ )
686
1