code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" def _lowerCamelCase ( UpperCAmelCase__ ) -> set: '''simple docstring''' a__ = set() # edges = list of graph's edges a__ = get_edges(lowercase_ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: a__ = edges.pop() chosen_vertices.add(lowercase_ ) chosen_vertices.add(lowercase_ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(lowercase_ ) return chosen_vertices def _lowerCamelCase ( UpperCAmelCase__ ) -> set: '''simple docstring''' a__ = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
232
def UpperCamelCase ( lowercase_ ) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __snake_case = 16 __snake_case = 32 def __lowerCAmelCase ( lowercase : int , lowercase : Union[str, Any] = 16 ) -> List[str]: """simple docstring""" snake_case : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" ) snake_case : Optional[int] = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase : List[str] ): # max_length=None => use the model max length (it's actually the default) snake_case : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase_ , max_length=lowercase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case : List[str] = datasets.map( lowercase_ , batched=lowercase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case : Dict = 16 elif accelerator.mixed_precision != "no": snake_case : str = 8 else: snake_case : Dict = None return tokenizer.pad( lowercase_ , padding="longest" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="pt" , ) # Instantiate dataloaders. snake_case : int = DataLoader( tokenized_datasets["train"] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) snake_case : Optional[int] = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __snake_case = mocked_dataloaders # noqa: F811 def __lowerCAmelCase ( lowercase : Any , lowercase : int ) -> Optional[Any]: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase_ ) == "1": snake_case : List[str] = 2 # New Code # snake_case : List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator snake_case : Optional[int] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase_ ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case : Dict = config["""lr"""] snake_case : Dict = int(config["num_epochs"] ) snake_case : int = int(config["seed"] ) snake_case : int = int(config["batch_size"] ) snake_case : str = evaluate.load("glue" , "mrpc" ) set_seed(lowercase_ ) snake_case : Dict = get_dataloaders(lowercase_ , lowercase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case : int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case : str = model.to(accelerator.device ) # Instantiate optimizer snake_case : Any = AdamW(params=model.parameters() , lr=lowercase_ ) # Instantiate scheduler snake_case : Optional[int] = get_linear_schedule_with_warmup( optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case : str = accelerator.prepare( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Now we train the model for epoch in range(lowercase_ ): model.train() for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase_ ): snake_case : List[str] = model(**lowercase_ ) snake_case : Any = output.loss accelerator.backward(lowercase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case : List[str] = model(**lowercase_ ) snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 ) snake_case : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase_ , references=lowercase_ , ) snake_case : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowercase_ ) def __lowerCAmelCase ( ) -> str: """simple docstring""" snake_case : int = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase_ , default=lowercase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=lowercase_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) snake_case : str = parser.parse_args() snake_case : List[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase_ , lowercase_ ) if __name__ == "__main__": main()
178
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = do_rescale lowercase__ : List[Any] = rescale_factor lowercase__ : Tuple = do_pad lowercase__ : Optional[Any] = pad_size def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height lowercase__ : str = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_) if not valid_images(SCREAMING_SNAKE_CASE_): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") # All transformations expect numpy arrays. lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images] if do_pad: lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images] lowercase__ : Dict = {"""pixel_values""": images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
12
0
'''simple docstring''' import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class SCREAMING_SNAKE_CASE( UpperCAmelCase_ ): """simple docstring""" def A ( self : Union[str, Any] , __snake_case : Dict ) -> int: with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as input_file: UpperCAmelCase : Any = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' ) UpperCAmelCase : Any = input_file.read() UpperCAmelCase : Any = regexp.search(SCREAMING_SNAKE_CASE_ ) return match def A ( self : Dict , __snake_case : Dict ) -> List[str]: with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as input_file: UpperCAmelCase : Any = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL ) UpperCAmelCase : Union[str, Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase : List[Any] = regexp.finditer(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase : List[Any] = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : Dict ) -> Tuple: UpperCAmelCase : Any = Path('''./datasets''' ) UpperCAmelCase : List[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(SCREAMING_SNAKE_CASE_ ) ): raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : List[str] ) -> Dict: UpperCAmelCase : Any = Path('''./datasets''' ) UpperCAmelCase : int = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_print_statements(str(SCREAMING_SNAKE_CASE_ ) ): raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
127
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu lowerCamelCase__ : Optional[int] = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[Any] = True while ask_again: lowercase__ : Tuple = input(lowercase_ ) try: if default is not None and len(lowercase_ ) == 0: return default return convert_value(lowercase_ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ ) lowercase__ : Any = menu.run(default_choice=lowercase_ ) return convert_value(lowercase_ ) if convert_value is not None else result def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : Union[str, Any] = int(lowercase_ ) return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : List[str] = int(lowercase_ ) return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] ) def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : str = int(lowercase_ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[Any] = int(lowercase_ ) return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : List[Any] = int(lowercase_ ) return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class _snake_case ( argparse.RawDescriptionHelpFormatter ): def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""") return usage
12
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCamelCase__ = { """facebook/nllb-large-en-ro""": 1024, """facebook/nllb-200-distilled-600M""": 1024, } # fmt: off lowerCamelCase__ = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :List[str] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ :str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ :Tuple = ['input_ids', 'attention_mask'] SCREAMING_SNAKE_CASE__ :List[Any] = NllbTokenizer SCREAMING_SNAKE_CASE__ :List[int] = [] SCREAMING_SNAKE_CASE__ :List[int] = [] def __init__( self : Any , __a : List[str]=None , __a : Optional[Any]=None , __a : int="<s>" , __a : Union[str, Any]="</s>" , __a : Optional[int]="</s>" , __a : Dict="<s>" , __a : Dict="<unk>" , __a : Tuple="<pad>" , __a : Any="<mask>" , __a : str=None , __a : Union[str, Any]=None , __a : List[Any]=None , __a : Optional[int]=False , **__a : int , ) -> List[str]: _UpperCamelCase : Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token _UpperCamelCase : Optional[int] = legacy_behaviour super().__init__( vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , legacy_behaviour=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) _UpperCamelCase : int = vocab_file _UpperCamelCase : Any = False if not self.vocab_file else True _UpperCamelCase : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) _UpperCamelCase : Optional[Any] = { lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _UpperCamelCase : int = src_lang if src_lang is not None else """eng_Latn""" _UpperCamelCase : Tuple = self.convert_tokens_to_ids(self._src_lang ) _UpperCamelCase : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __SCREAMING_SNAKE_CASE ( self : int ) -> str: return self._src_lang @src_lang.setter def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Tuple ) -> List[str]: _UpperCamelCase : int = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] , __a : Any = None ) -> List[Any]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Dict , __a : Any = None ) -> Dict: _UpperCamelCase : Optional[int] = [self.sep_token_id] _UpperCamelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[Any] , __a : List[str] , __a : Optional[int] , __a : int , **__a : Optional[int] ) -> Any: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) _UpperCamelCase : Tuple = src_lang _UpperCamelCase : Any = self(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) _UpperCamelCase : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) _UpperCamelCase : Optional[int] = tgt_lang_id return inputs def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[Any] , __a : Optional[Any] = "eng_Latn" , __a : Optional[Any] = None , __a : str = "fra_Latn" , **__a : str , ) -> Any: _UpperCamelCase : Optional[int] = src_lang _UpperCamelCase : List[str] = tgt_lang return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __SCREAMING_SNAKE_CASE ( self : int , __a : Any ) -> Union[str, Any]: _UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) if self.legacy_behaviour: _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code] else: _UpperCamelCase : Optional[Any] = [self.cur_lang_code] _UpperCamelCase : Tuple = [self.eos_token_id] _UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens ) _UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _UpperCamelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] ) -> Optional[Any]: _UpperCamelCase : int = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) if self.legacy_behaviour: _UpperCamelCase : Dict = [] _UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _UpperCamelCase : Tuple = [self.cur_lang_code] _UpperCamelCase : str = [self.eos_token_id] _UpperCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) _UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens ) _UpperCamelCase : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[Any] , __a : List[Any] = None ) -> Optional[int]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return _UpperCamelCase : List[Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
624
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ : Tuple = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
0
'''simple docstring''' def _lowerCAmelCase (_lowercase ): """simple docstring""" if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) a__ = """""" while len(lowercase_ ) % 3 != 0: a__ = """0""" + bin_string a__ = [ bin_string[index : index + 3] for index in range(len(lowercase_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: a__ = 0 for index, val in enumerate(lowercase_ ): oct_val += int(2 ** (2 - index) * int(lowercase_ ) ) oct_string += str(lowercase_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
331
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _snake_case ( UpperCAmelCase_ ): def __init__( self): '''simple docstring''' lowercase__ : List[Any] = [] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_init_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_evaluate""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_predict""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_save""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_log""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_prediction_step""") @require_torch class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = tempfile.mkdtemp() def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.output_dir) def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_) lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_) lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_) return Trainer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_)) # Order doesn't matter lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__) elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_) else: self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = ["""on_init_end""", """on_train_begin"""] lowercase__ : Union[str, Any] = 0 lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader()) lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs): expected_events.append("""on_epoch_begin""") for _ in range(SCREAMING_SNAKE_CASE_): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""") expected_events.append("""on_epoch_end""") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.get_trainer() lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # Callbacks passed at init are added to the default callbacks lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : Tuple = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # We can also add, pop, or remove by instance lowercase__ : Union[str, Any] = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : str = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # Independent log/save/eval lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5) trainer.train() lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5) trainer.train() lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""") trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""") trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""") as warn_mock: lowercase__ : Dict = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
12
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ : List[str] = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
439
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = RoCBertTokenizer __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : str = False __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Optional[int] = filter_non_english def lowercase__ ( self): '''simple docstring''' super().setUp() lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""] lowercase__ : Dict = {} lowercase__ : Tuple = {} for i, value in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = i lowercase__ : Any = i lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""]) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_) with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""]) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowercase__ : Optional[int] = {} for i, token in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = i lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""") self.assertListEqual(tokenizer.tokenize("""""") , []) self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""]) self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""]) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_whitespace(""" """)) self.assertTrue(_is_whitespace("""\t""")) self.assertTrue(_is_whitespace("""\r""")) self.assertTrue(_is_whitespace("""\n""")) self.assertTrue(_is_whitespace("""\u00A0""")) self.assertFalse(_is_whitespace("""A""")) self.assertFalse(_is_whitespace("""-""")) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_control("""\u0005""")) self.assertFalse(_is_control("""A""")) self.assertFalse(_is_control(""" """)) self.assertFalse(_is_control("""\t""")) self.assertFalse(_is_control("""\r""")) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_punctuation("""-""")) self.assertTrue(_is_punctuation("""$""")) self.assertTrue(_is_punctuation("""`""")) self.assertTrue(_is_punctuation(""".""")) self.assertFalse(_is_punctuation("""A""")) self.assertFalse(_is_punctuation(""" """)) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) if self.test_rust_tokenizer: lowercase__ : int = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) def lowercase__ ( self): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' lowercase__ : List[str] = tokenizer_r.encode_plus( SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False lowercase__ : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""])) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = ["""的""", """人""", """有"""] lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : Union[str, Any] = True lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = False lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) # it is expected that only the first Chinese character is not preceded by "##". lowercase__ : Any = [ f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_) ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): lowercase__ : Optional[int] = """你好,你是谁""" lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer.prepare_for_model( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
12
0
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) __UpperCamelCase : Dict = logging.getLogger(__name__) class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ) ->Union[str, Any]: '''simple docstring''' __a = self.layer[current_layer](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , head_mask[current_layer] ) __a = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCAmelCase_ , ) class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): def __init__( self , lowerCamelCase ) ->Dict: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ) __a = BertEncoderWithPabee(SCREAMING_SNAKE_CASE_ ) self.init_weights() __a = 0 __a = 0 __a = 0 __a = 0 def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]: '''simple docstring''' __a = threshold def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]: '''simple docstring''' __a = patience def __UpperCamelCase ( self ) ->Dict: '''simple docstring''' __a = 0 __a = 0 def __UpperCamelCase ( self ) ->str: '''simple docstring''' __a = self.inference_layers_num / self.inference_instances_num __a = ( F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =""" F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***""" ) print(SCREAMING_SNAKE_CASE_ ) @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) def __UpperCamelCase ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , ) ->Dict: '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: __a = input_ids.size() elif inputs_embeds is not None: __a = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) __a = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __a = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) if token_type_ids is None: __a = torch.zeros(SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __a = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __a = encoder_hidden_states.size() __a = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __a = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) __a = self.invert_attention_mask(SCREAMING_SNAKE_CASE_ ) else: __a = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __a = self.get_head_mask(SCREAMING_SNAKE_CASE_ , self.config.num_hidden_layers ) __a = self.embeddings( input_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ ) __a = embedding_output if self.training: __a = [] for i in range(self.config.num_hidden_layers ): __a = self.encoder.adaptive_forward( SCREAMING_SNAKE_CASE_ , current_layer=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ ) __a = self.pooler(SCREAMING_SNAKE_CASE_ ) __a = output_layers[i](output_dropout(SCREAMING_SNAKE_CASE_ ) ) res.append(SCREAMING_SNAKE_CASE_ ) elif self.patience == 0: # Use all layers for inference __a = self.encoder( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) __a = self.pooler(encoder_outputs[0] ) __a = [output_layers[self.config.num_hidden_layers - 1](SCREAMING_SNAKE_CASE_ )] else: __a = 0 __a = None __a = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __a = self.encoder.adaptive_forward( SCREAMING_SNAKE_CASE_ , current_layer=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ ) __a = self.pooler(SCREAMING_SNAKE_CASE_ ) __a = output_layers[i](SCREAMING_SNAKE_CASE_ ) if regression: __a = logits.detach() if patient_result is not None: __a = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __a = 0 else: __a = logits.detach().argmax(dim=1 ) if patient_result is not None: __a = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(SCREAMING_SNAKE_CASE_ ) ): patient_counter += 1 else: __a = 0 __a = logits if patient_counter == self.patience: break __a = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCAmelCase_ , ) class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): def __init__( self , lowerCamelCase ) ->int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ) __a = config.num_labels __a = BertModelWithPabee(SCREAMING_SNAKE_CASE_ ) __a = nn.Dropout(config.hidden_dropout_prob ) __a = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) def __UpperCamelCase ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) ->Tuple: '''simple docstring''' __a = self.bert( input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __a = (logits[-1],) if labels is not None: __a = None __a = 0 for ix, logits_item in enumerate(SCREAMING_SNAKE_CASE_ ): if self.num_labels == 1: # We are doing regression __a = MSELoss() __a = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __a = CrossEntropyLoss() __a = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __a = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __a = (total_loss / total_weights,) + outputs return outputs
448
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase_ ) class _snake_case ( UpperCAmelCase_ ): def __init__( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) if self.framework == "tf": raise ValueError(f'The {self.__class__} is only available in PyTorch.') requires_backends(self , """vision""") self.check_model_type(SCREAMING_SNAKE_CASE_) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' if "text_queries" in kwargs: lowercase__ : Any = kwargs.pop("""text_queries""") if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)): lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels} else: lowercase__ : int = image lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) return results def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = {} if "threshold" in kwargs: lowercase__ : List[Any] = kwargs["""threshold"""] if "top_k" in kwargs: lowercase__ : int = kwargs["""top_k"""] return {}, {}, postprocess_params def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = load_image(inputs["""image"""]) lowercase__ : Any = inputs["""candidate_labels"""] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): lowercase__ : List[str] = candidate_labels.split(""",""") lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) yield { "is_last": i == len(SCREAMING_SNAKE_CASE_) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = model_inputs.pop("""target_size""") lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""") lowercase__ : Dict = model_inputs.pop("""is_last""") lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : Union[str, Any] = [] for model_output in model_outputs: lowercase__ : Optional[int] = model_output["""candidate_label"""] lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.image_processor.post_process_object_detection( outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0] for index in outputs["scores"].nonzero(): lowercase__ : Optional[Any] = outputs["""scores"""][index].item() lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0]) lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box} results.append(SCREAMING_SNAKE_CASE_) lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_) if top_k: lowercase__ : Any = results[:top_k] return results def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""") lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist() lowercase__ : Optional[int] = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
12
0
def lowerCamelCase__ ( _a , _a): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
25
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) else: lowercase__ : List[str] = max( mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , ) lowercase__ : List[Any] = val return f[i][j] def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase__ : Tuple = dp[i - 1][w_] return dp[n][w_], dp def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )): raise ValueError( """Both the weights and values vectors must be either lists or tuples""" ) lowercase__ : str = len(lowercase_ ) if num_items != len(lowercase_ ): lowercase__ : Optional[int] = ( """The number of weights must be the same as the number of values.\n""" F'But got {num_items} weights and {len(lowercase_ )} values' ) raise ValueError(lowercase_ ) for i in range(lowercase_ ): if not isinstance(wt[i] , lowercase_ ): lowercase__ : int = ( """All weights must be integers but got weight of """ F'type {type(wt[i] )} at index {i}' ) raise TypeError(lowercase_ ) lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : set = set() _construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) return optimal_val, example_optional_set def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ ) else: optimal_set.add(lowercase_ ) _construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : Dict = [3, 2, 4, 4] lowerCamelCase__ : List[Any] = [4, 3, 2, 3] lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Dict = 6 lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
12
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu _snake_case : Optional[int] = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def __snake_case ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None ): '''simple docstring''' lowercase = True while ask_again: lowercase = input(lowercase_ ) try: if default is not None and len(lowercase_ ) == 0: return default return convert_value(lowercase_ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowercase_ ) def __snake_case ( __magic_name__ , __magic_name__=[] , __magic_name__=None , __magic_name__=0 ): '''simple docstring''' lowercase = BulletMenu(lowercase_ , lowercase_ ) lowercase = menu.run(default_choice=lowercase_ ) return convert_value(lowercase_ ) if convert_value is not None else result def __snake_case ( __magic_name__ ): '''simple docstring''' lowercase = int(lowercase_ ) return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] ) def __snake_case ( __magic_name__ ): '''simple docstring''' lowercase = int(lowercase_ ) return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] ) def __snake_case ( __magic_name__ ): '''simple docstring''' lowercase = int(lowercase_ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def __snake_case ( __magic_name__ ): '''simple docstring''' lowercase = int(lowercase_ ) return PrecisionType(["no", "fp16", "bf16", "fp8"][value] ) def __snake_case ( __magic_name__ ): '''simple docstring''' lowercase = int(lowercase_ ) return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] ) def __snake_case ( __magic_name__ ): '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class UpperCamelCase_ ( argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def SCREAMING_SNAKE_CASE( self :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple ) ->List[str]: lowercase = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase = usage.replace("<command> [<args>] " , "" ) return usage
441
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : int = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" ) lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" ) lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" ) lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" ) lowercase__ : str = value.float() for key, value in codebook_state_dict.items(): lowercase__ : Any = value return upgrade @torch.no_grad() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ ) else: lowercase__ : Optional[int] = FlavaConfig() lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval() lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ ) if os.path.exists(lowercase_ ): lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" ) else: lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ ) hf_model.load_state_dict(lowercase_ ) lowercase__ : Optional[int] = hf_model.state_dict() lowercase__ : Optional[int] = count_parameters(lowercase_ ) lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ ) assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ : List[str] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
12
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ : List[str] = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : int = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __magic_name__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
281
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ): '''simple docstring''' lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18} lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : List[str] = num_channels lowercase__ : str = image_size lowercase__ : int = min_resolution lowercase__ : Dict = max_resolution lowercase__ : Tuple = do_resize lowercase__ : Union[str, Any] = size lowercase__ : Any = do_normalize lowercase__ : Tuple = image_mean lowercase__ : str = image_std def lowercase__ ( self): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None def lowercase__ ( self): '''simple docstring''' lowercase__ : str = EfficientFormerImageProcessorTester(self) @property def lowercase__ ( self): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""")) def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray) # Test not batched input lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
12
0
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase_ ) class lowercase_ ( UpperCAmelCase_ ): '''simple docstring''' __lowerCAmelCase : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} ) __lowerCAmelCase : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} ) __lowerCAmelCase : ClassVar[Features] = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) __lowerCAmelCase : str = "question" __lowerCAmelCase : str = "context" __lowerCAmelCase : str = "answers" @property def snake_case_ ( self ) -> Any: """simple docstring""" return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
447
lowerCamelCase__ : dict[tuple[int, int, int], int] = {} def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowercase__ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 ) lowercase__ : List[str] = state_late + state_absent + state_ontime lowercase__ : List[Any] = prizestrings return prizestrings def UpperCamelCase ( lowercase_ = 30 ) -> int: '''simple docstring''' return _calculate(lowercase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
12
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter __magic_name__ = """Create a default config file for Accelerate with only a few flags set.""" def _lowerCamelCase ( UpperCAmelCase__="no",UpperCAmelCase__ = default_json_config_file,UpperCAmelCase__ = False ) -> Any: '''simple docstring''' a__ = Path(lowercase_ ) path.parent.mkdir(parents=lowercase_,exist_ok=lowercase_ ) if path.exists(): print( f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False a__ = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) a__ = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): a__ = torch.cuda.device_count() a__ = num_gpus a__ = False if num_gpus > 1: a__ = """MULTI_GPU""" else: a__ = """NO""" elif is_xpu_available() and use_xpu: a__ = torch.xpu.device_count() a__ = num_xpus a__ = False if num_xpus > 1: a__ = """MULTI_XPU""" else: a__ = """NO""" elif is_npu_available(): a__ = torch.npu.device_count() a__ = num_npus a__ = False if num_npus > 1: a__ = """MULTI_NPU""" else: a__ = """NO""" else: a__ = 0 a__ = True a__ = 1 a__ = """NO""" a__ = ClusterConfig(**lowercase_ ) config.to_json_file(lowercase_ ) return path def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]: '''simple docstring''' a__ = parser.add_parser('default',parents=lowercase_,help=lowercase_,formatter_class=lowercase_ ) parser.add_argument( '--config_file',default=lowercase_,help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ),dest='save_location',) parser.add_argument( '--mixed_precision',choices=['no', 'fp16', 'bf16'],type=lowercase_,help='Whether or not to use mixed precision training. ' 'Choose between FP16 and BF16 (bfloat16) training. ' 'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.',default='no',) parser.set_defaults(func=lowercase_ ) return parser def _lowerCamelCase ( UpperCAmelCase__ ) -> Any: '''simple docstring''' a__ = write_basic_config(args.mixed_precision,args.save_location ) if config_file: print(f'''accelerate configuration saved at {config_file}''' )
232
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def UpperCamelCase ( ) -> List[Any]: '''simple docstring''' raise RuntimeError("""CUDA out of memory.""" ) class _snake_case ( nn.Module ): def __init__( self): '''simple docstring''' super().__init__() lowercase__ : Optional[Any] = nn.Linear(3 , 4) lowercase__ : Union[str, Any] = nn.BatchNormad(4) lowercase__ : str = nn.Linear(4 , 5) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_))) class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) self.assertListEqual([bs, arga] , [8, """hello"""]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=0) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function(1_28 , """hello""" , """world""") self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0]) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): raise ValueError("""Oops, we had an error!""") with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0]) @require_cuda def lowercase__ ( self): '''simple docstring''' lowercase__ : str = torch.cuda.memory_allocated() lowercase__ : str = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_) self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
12
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available __snake_case = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
178
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowercase__ : Optional[int] = 4 lowercase__ : Optional[Any] = 48 lowercase__ : int = """pixelshuffle_aux""" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : List[str] = [6, 6, 6, 6] lowercase__ : Any = 60 lowercase__ : Tuple = [6, 6, 6, 6] lowercase__ : Dict = """pixelshuffledirect""" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = 4 lowercase__ : Any = """nearest+conv""" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowercase__ : str = 1 lowercase__ : Optional[int] = 1 lowercase__ : Optional[int] = 1_26 lowercase__ : Any = 7 lowercase__ : int = 255.0 lowercase__ : List[Any] = """""" return config def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" ) if "layers" in name: lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" ) if "residual_group.blocks" in name: lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" ) if "attn.proj" in name: lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if "patch_embed.proj" in name: lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" ) if name == "norm.weight": lowercase__ : Union[str, Any] = """layernorm.weight""" if name == "norm.bias": lowercase__ : List[str] = """layernorm.bias""" if "conv_first" in name: lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" ) if "upsample.0" in name: lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" ) if "upsample.2" in name: lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" ) lowercase__ : List[str] = """upsample.""" + name elif config.upsampler == "pixelshuffledirect": lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" ) lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" ) else: pass else: lowercase__ : str = """swin2sr.""" + name return name def UpperCamelCase ( lowercase_ , lowercase_ ) -> int: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowercase__ : str = orig_state_dict.pop(lowercase_ ) if "qkv" in key: lowercase__ : Any = key.split(""".""" ) lowercase__ : List[Any] = int(key_split[1] ) lowercase__ : Dict = int(key_split[4] ) lowercase__ : Optional[Any] = config.embed_dim if "weight" in key: lowercase__ : List[str] = val[:dim, :] lowercase__ : List[str] = val[dim : dim * 2, :] lowercase__ : Optional[Any] = val[-dim:, :] else: lowercase__ : Optional[Any] = val[:dim] lowercase__ : List[Any] = val[dim : dim * 2] lowercase__ : Optional[int] = val[-dim:] pass else: lowercase__ : Optional[Any] = val return orig_state_dict def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Dict = get_config(lowercase_ ) lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ ) model.eval() lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ ) lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ ) if len(lowercase_ ) > 0: raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true""" lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" ) lowercase__ : Any = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56 lowercase__ : Union[str, Any] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 ) if config.num_channels == 1: lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) lowercase__ : Union[str, Any] = model(lowercase_ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : Optional[Any] = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : int = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 ) print("""Looks ok!""" ) lowercase__ : str = { """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": ( """swin2SR-classical-sr-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": ( """swin2SR-classical-sr-x4-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": ( """swin2SR-compressed-sr-x4-48""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": ( """swin2SR-lightweight-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": ( """swin2SR-realworld-sr-x4-64-bsrgan-psnr""" ), } lowercase__ : str = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowercase_ ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") lowerCamelCase__ : Any = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
12
0
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase : Dict = FileLock(str(tmpdir / '''foo.lock''' ) ) UpperCAmelCase : Tuple = FileLock(str(tmpdir / '''foo.lock''' ) ) UpperCAmelCase : Optional[int] = 0.0_1 with locka.acquire(): with pytest.raises(lowercase_ ): UpperCAmelCase : str = time.time() locka.acquire(lowercase_ ) assert time.time() - _start > timeout def snake_case_ ( _lowerCAmelCase : Tuple ) -> List[Any]: UpperCAmelCase : Optional[int] = """a""" * 1000 + """.lock""" UpperCAmelCase : Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(lowercase_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 UpperCAmelCase : str = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase_ ): locka.acquire(0 )
127
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : BigBirdConfig __lowerCAmelCase : jnp.dtype = jnp.floataa __lowerCAmelCase : bool = True def lowercase__ ( self): '''simple docstring''' super().setup() lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype) def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ): lowercase__ : int = logits.shape[-1] lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" ) lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 ) lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowercase__ : Optional[int] = reduction(lowercase_ ) return loss lowercase__ : int = partial(lowercase_ , reduction=jnp.mean ) lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _snake_case : __lowerCAmelCase : str = "google/bigbird-roberta-base" __lowerCAmelCase : int = 3_000 __lowerCAmelCase : int = 10_500 __lowerCAmelCase : int = 128 __lowerCAmelCase : int = 3 __lowerCAmelCase : int = 1 __lowerCAmelCase : int = 5 # tx_args __lowerCAmelCase : float = 3e-5 __lowerCAmelCase : float = 0.0 __lowerCAmelCase : int = 20_000 __lowerCAmelCase : float = 0.0_095 __lowerCAmelCase : str = "bigbird-roberta-natural-questions" __lowerCAmelCase : str = "training-expt" __lowerCAmelCase : str = "data/nq-training.jsonl" __lowerCAmelCase : str = "data/nq-validation.jsonl" def lowercase__ ( self): '''simple docstring''' os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_) lowercase__ : Any = os.path.join(self.base_dir , self.save_dir) lowercase__ : str = self.batch_size_per_device * jax.device_count() @dataclass class _snake_case : __lowerCAmelCase : int __lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs def __call__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""]) lowercase__ : str = { """input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa), } return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids] return zip(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))] while len(SCREAMING_SNAKE_CASE_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]: '''simple docstring''' if seed is not None: lowercase__ : Any = dataset.shuffle(seed=lowercase_ ) for i in range(len(lowercase_ ) // batch_size ): lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowercase_ ) @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int: '''simple docstring''' def loss_fn(lowercase_ ): lowercase__ : Dict = model_inputs.pop("""start_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""end_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Any = outputs return state.loss_fn( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ ) lowercase__ : Tuple = jax.value_and_grad(lowercase_ ) lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params ) lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" ) lowercase__ : str = state.apply_gradients(grads=lowercase_ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str: '''simple docstring''' lowercase__ : Tuple = model_inputs.pop("""start_labels""" ) lowercase__ : List[str] = model_inputs.pop("""end_labels""" ) lowercase__ : int = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class _snake_case ( train_state.TrainState ): __lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ ) @dataclass class _snake_case : __lowerCAmelCase : Args __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : wandb __lowerCAmelCase : Callable = None def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : List[str] = model.params lowercase__ : Dict = TrainState.create( apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , ) if ckpt_dir is not None: lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = train_state.TrainState( step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Optional[Any] = args lowercase__ : Union[str, Any] = data_collator lowercase__ : str = lr lowercase__ : Union[str, Any] = params lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_) return state def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = self.args lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size lowercase__ : int = jax.random.PRNGKey(0) lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) for epoch in range(args.max_epochs): lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa) lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 if i % args.logging_steps == 0: lowercase__ : List[str] = jax_utils.unreplicate(state.step) lowercase__ : str = running_loss.item() / i lowercase__ : Tuple = self.scheduler_fn(state_step - 1) lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(SCREAMING_SNAKE_CASE_)) self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size) lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa) lowercase__ : Optional[Any] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 return running_loss / i def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_) print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """) self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib""")) joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib""")) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f: json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_) print("""DONE""") def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ ) with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f: lowercase__ : Optional[Any] = from_bytes(state.params , f.read() ) with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f: lowercase__ : Dict = from_bytes(state.opt_state , f.read() ) lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) ) lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) ) with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f: lowercase__ : int = json.load(lowercase_ ) lowercase__ : Optional[Any] = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Optional[int] = num_train_steps - warmup_steps lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ ) lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ ) lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' def weight_decay_mask(lowercase_ ): lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ ) lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(lowercase_ ) lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ ) return tx, lr
12
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class __SCREAMING_SNAKE_CASE : '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = MBartConfig SCREAMING_SNAKE_CASE__ :Tuple = {} SCREAMING_SNAKE_CASE__ :Any = 'gelu' def __init__( self : int , __a : Optional[int] , __a : List[Any]=13 , __a : Tuple=7 , __a : Dict=True , __a : Dict=False , __a : Optional[Any]=99 , __a : str=32 , __a : str=2 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Any=0.1 , __a : Tuple=0.1 , __a : str=20 , __a : Union[str, Any]=2 , __a : List[str]=1 , __a : Optional[Any]=0 , ) -> Any: _UpperCamelCase : str = parent _UpperCamelCase : Tuple = batch_size _UpperCamelCase : List[str] = seq_length _UpperCamelCase : List[Any] = is_training _UpperCamelCase : str = use_labels _UpperCamelCase : Any = vocab_size _UpperCamelCase : str = hidden_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : Optional[int] = num_attention_heads _UpperCamelCase : List[str] = intermediate_size _UpperCamelCase : int = hidden_dropout_prob _UpperCamelCase : Dict = attention_probs_dropout_prob _UpperCamelCase : List[str] = max_position_embeddings _UpperCamelCase : Any = eos_token_id _UpperCamelCase : Dict = pad_token_id _UpperCamelCase : Dict = bos_token_id def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: _UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _UpperCamelCase : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCamelCase : Dict = tf.concat([input_ids, eos_tensor] , axis=1 ) _UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCamelCase : Tuple = prepare_mbart_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return config, inputs_dict def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Any , __a : List[str] ) -> str: _UpperCamelCase : Optional[Any] = TFMBartModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder() _UpperCamelCase : Optional[Any] = inputs_dict["""input_ids"""] _UpperCamelCase : List[str] = input_ids[:1, :] _UpperCamelCase : Optional[Any] = inputs_dict["""attention_mask"""][:1, :] _UpperCamelCase : List[str] = inputs_dict["""head_mask"""] _UpperCamelCase : Any = 1 # first forward pass _UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) _UpperCamelCase : int = outputs.to_tuple() _UpperCamelCase : str = past_key_values[1] def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> str: """simple docstring""" if attention_mask is None: _UpperCamelCase : Optional[int] = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: _UpperCamelCase : Optional[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: _UpperCamelCase : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCamelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _UpperCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () SCREAMING_SNAKE_CASE__ :List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else () SCREAMING_SNAKE_CASE__ :Optional[Any] = ( { 'conversational': TFMBartForConditionalGeneration, 'feature-extraction': TFMBartModel, 'summarization': TFMBartForConditionalGeneration, 'text2text-generation': TFMBartForConditionalGeneration, 'translation': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ :Optional[int] = True SCREAMING_SNAKE_CASE__ :str = False SCREAMING_SNAKE_CASE__ :Union[str, Any] = False def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Dict , __a : List[Any] ) -> int: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: _UpperCamelCase : Union[str, Any] = TFMBartModelTester(self ) _UpperCamelCase : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ ) @require_sentencepiece @require_tokenizers @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[Any] = [ ' UN Chief Says There Is No Military Solution in Syria', ] SCREAMING_SNAKE_CASE__ :Any = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', ] SCREAMING_SNAKE_CASE__ :int = 'facebook/mbart-large-en-ro' @cached_property def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: _UpperCamelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __SCREAMING_SNAKE_CASE ( self : Optional[int] , **__a : Any ) -> Union[str, Any]: _UpperCamelCase : Dict = self.translate_src_text(**SCREAMING_SNAKE_CASE_ ) self.assertListEqual(self.expected_text , SCREAMING_SNAKE_CASE_ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , **__a : Optional[int] ) -> Union[str, Any]: _UpperCamelCase : List[Any] = self.tokenizer(self.src_text , **SCREAMING_SNAKE_CASE_ , return_tensors="tf" ) _UpperCamelCase : Any = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) _UpperCamelCase : Union[str, Any] = self.tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) return generated_words @slow def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: self._assert_generated_batch_equal_expected()
624
lowerCamelCase__ : List[str] = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase__ : int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
12
0
'''simple docstring''' import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ): """simple docstring""" a__ = s.rsplit(lowercase_ , lowercase_ ) return new.join(lowercase_ ) def _lowerCAmelCase (_lowercase ): """simple docstring""" return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def _lowerCAmelCase (_lowercase ): """simple docstring""" a__ = {} a__ = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: a__ = key.replace(F'{group_key}.' , F'{group_key}.group.' ) if "res_path" in key: a__ = key.replace("res_path." , "res_path.path." ) if key.endswith(".w" ): a__ = rreplace(lowercase_ , ".w" , ".weight" , 1 ) if key.endswith(".b" ): a__ = rreplace(lowercase_ , ".b" , ".bias" , 1 ) a__ = value.float() return upgrade @torch.no_grad() def _lowerCAmelCase (_lowercase , _lowercase , _lowercase=None , _lowercase=True ): """simple docstring""" from dall_e import Encoder a__ = Encoder() if os.path.exists(lowercase_ ): a__ = torch.load(lowercase_ ) else: a__ = torch.hub.load_state_dict_from_url(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): a__ = ckpt.state_dict() encoder.load_state_dict(lowercase_ ) if config_path is not None: a__ = FlavaImageCodebookConfig.from_pretrained(lowercase_ ) else: a__ = FlavaImageCodebookConfig() a__ = FlavaImageCodebook(lowercase_ ).eval() a__ = encoder.state_dict() a__ = upgrade_state_dict(lowercase_ ) hf_model.load_state_dict(lowercase_ ) a__ = hf_model.state_dict() a__ = count_parameters(lowercase_ ) a__ = count_parameters(lowercase_ ) assert torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(lowercase_ ) else: return hf_state_dict if __name__ == "__main__": UpperCamelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") UpperCamelCase_ : List[str] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
331
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ): '''simple docstring''' lowercase__ : str = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[int] = seq_length lowercase__ : Union[str, Any] = is_training lowercase__ : Any = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[int] = vocab_size lowercase__ : Optional[Any] = hidden_size lowercase__ : Any = rotary_dim lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Tuple = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[int] = None lowercase__ : str = vocab_size - 1 lowercase__ : Any = vocab_size - 1 lowercase__ : Dict = vocab_size - 1 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : Any = None if self.use_input_mask: lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ : List[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = 20 lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""") lowercase__ : Tuple = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : List[str] = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : str = model( input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = 20 lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : Any = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_) lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') @require_flax class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = FlaxGPTJModelTester(self) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @tooslow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""") lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : Optional[Any] = False lowercase__ : List[str] = model.config.eos_token_id lowercase__ : List[Any] = jax.jit(model.generate) lowercase__ : Tuple = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : str = 0 lowercase__ : List[Any] = 1 lowercase__ : Dict = 0 lowercase__ : Any = 1 lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = fx_state with torch.no_grad(): lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_) lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params) lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = 0 lowercase__ : int = 1 lowercase__ : str = 0 lowercase__ : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_) with torch.no_grad(): lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) @tooslow def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : int = model(np.ones((1, 1))) self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
12
0
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration a_ : int = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = ["""layers""", """blocks"""] for k in ignore_keys: state_dict.pop(lowercase_ , lowercase_ ) a_ : Optional[Any] = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = list(s_dict.keys() ) for key in keys: SCREAMING_SNAKE_CASE = key for k, v in WHISPER_MAPPING.items(): if k in key: SCREAMING_SNAKE_CASE = new_key.replace(lowercase_ , lowercase_ ) print(f"""{key} -> {new_key}""" ) SCREAMING_SNAKE_CASE = s_dict.pop(lowercase_ ) return s_dict def __lowerCAmelCase ( _UpperCamelCase : Any ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = emb.weight.shape SCREAMING_SNAKE_CASE = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ ) SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any ) -> bytes: '''simple docstring''' os.makedirs(lowercase_ , exist_ok=lowercase_ ) SCREAMING_SNAKE_CASE = os.path.basename(lowercase_ ) SCREAMING_SNAKE_CASE = url.split('/' )[-2] SCREAMING_SNAKE_CASE = os.path.join(lowercase_ , lowercase_ ) if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ): raise RuntimeError(f"""{download_target} exists and is not a regular file""" ) if os.path.isfile(lowercase_ ): SCREAMING_SNAKE_CASE = open(lowercase_ , 'rb' ).read() if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=lowercase_ , unit_divisor=10_24 ) as loop: while True: SCREAMING_SNAKE_CASE = source.read(81_92 ) if not buffer: break output.write(lowercase_ ) loop.update(len(lowercase_ ) ) SCREAMING_SNAKE_CASE = open(lowercase_ , 'rb' ).read() if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' if ".pt" not in checkpoint_path: SCREAMING_SNAKE_CASE = _download(_MODELS[checkpoint_path] ) else: SCREAMING_SNAKE_CASE = torch.load(lowercase_ , map_location='cpu' ) SCREAMING_SNAKE_CASE = original_checkpoint["""dims"""] SCREAMING_SNAKE_CASE = original_checkpoint["""model_state_dict"""] SCREAMING_SNAKE_CASE = state_dict["""decoder.token_embedding.weight"""] remove_ignore_keys_(lowercase_ ) rename_keys(lowercase_ ) SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = state_dict["""decoder.layers.0.fc1.weight"""].shape[0] SCREAMING_SNAKE_CASE = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) SCREAMING_SNAKE_CASE = WhisperForConditionalGeneration(lowercase_ ) SCREAMING_SNAKE_CASE = model.model.load_state_dict(lowercase_ , strict=lowercase_ ) if len(lowercase_ ) > 0 and not set(lowercase_ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' f""" but all the following weights are missing {missing}""" ) if tie_embeds: SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens ) else: SCREAMING_SNAKE_CASE = proj_out_weights model.save_pretrained(lowercase_ ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") a_ : Any = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
439
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['image_processor', 'tokenizer'] __lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor' __lowerCAmelCase : int = 'AutoTokenizer' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if images is not None: lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if text is not None and images is not None: lowercase__ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
12
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __UpperCamelCase : str = logging.get_logger(__name__) __UpperCamelCase : Dict = { """openai/imagegpt-small""": """""", """openai/imagegpt-medium""": """""", """openai/imagegpt-large""": """""", } class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): __a ='imagegpt' __a =['past_key_values'] __a ={ 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , lowerCamelCase=512 + 1 , lowerCamelCase=32 * 32 , lowerCamelCase=512 , lowerCamelCase=24 , lowerCamelCase=8 , lowerCamelCase=None , lowerCamelCase="quick_gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1e-5 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ) ->Union[str, Any]: '''simple docstring''' __a = vocab_size __a = n_positions __a = n_embd __a = n_layer __a = n_head __a = n_inner __a = activation_function __a = resid_pdrop __a = embd_pdrop __a = attn_pdrop __a = layer_norm_epsilon __a = initializer_range __a = scale_attn_weights __a = use_cache __a = scale_attn_by_inverse_layer_idx __a = reorder_and_upcast_attn __a = tie_word_embeddings super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ): @property def __UpperCamelCase ( self ) ->int: '''simple docstring''' return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ] ) def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = 3 , lowerCamelCase = 32 , lowerCamelCase = 32 , ) ->Dict: '''simple docstring''' __a = self._generate_dummy_images(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __a = dict(preprocessor(images=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) ) return inputs
448
def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if n == 1 or not isinstance(lowercase_ , lowercase_ ): return 0 elif n == 2: return 1 else: lowercase__ : List[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : Optional[Any] = 0 lowercase__ : Dict = 2 while digits < n: index += 1 lowercase__ : str = len(str(fibonacci(lowercase_ ) ) ) return index def UpperCamelCase ( lowercase_ = 10_00 ) -> int: '''simple docstring''' return fibonacci_digits_index(lowercase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
12
0
from __future__ import annotations from typing import TypedDict class _UpperCamelCase ( UpperCAmelCase_ ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 def lowerCamelCase__ ( _a): if not isinstance(lowercase_ , lowercase_): raise TypeError("The parameter s type must be str.") return [s[i:] + s[:i] for i in range(len(lowercase_))] def lowerCamelCase__ ( _a): if not isinstance(lowercase_ , lowercase_): raise TypeError("The parameter s type must be str.") if not s: raise ValueError("The parameter s must not be empty.") SCREAMING_SNAKE_CASE : List[str] = all_rotations(lowercase_) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation SCREAMING_SNAKE_CASE : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations]), "idx_original_string": rotations.index(lowercase_), } return response def lowerCamelCase__ ( _a , _a): if not isinstance(lowercase_ , lowercase_): raise TypeError("The parameter bwt_string type must be str.") if not bwt_string: raise ValueError("The parameter bwt_string must not be empty.") try: SCREAMING_SNAKE_CASE : Optional[Any] = int(lowercase_) except ValueError: raise TypeError( "The parameter idx_original_string type must be int or passive" " of cast to int.") if idx_original_string < 0: raise ValueError("The parameter idx_original_string must not be lower than 0.") if idx_original_string >= len(lowercase_): raise ValueError( "The parameter idx_original_string must be lower than" " len(bwt_string).") SCREAMING_SNAKE_CASE : str = [""""""] * len(lowercase_) for _ in range(len(lowercase_)): for i in range(len(lowercase_)): SCREAMING_SNAKE_CASE : List[Any] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": a_ = """Provide a string that I will generate its BWT transform: """ a_ = input(entry_msg).strip() a_ = bwt_transform(s) print( F'''Burrows Wheeler transform for string \'{s}\' results ''' F'''in \'{result["bwt_string"]}\'''' ) a_ = reverse_bwt(result['bwt_string'], result['idx_original_string']) print( F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ''' F'''we get original string \'{original_string}\'''' )
25
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set.""" def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any: '''simple docstring''' lowercase__ : Any = Path(lowercase_ ) path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ ) if path.exists(): print( F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' ) return False lowercase__ : int = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' ) lowercase__ : Dict = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): lowercase__ : Any = torch.cuda.device_count() lowercase__ : Any = num_gpus lowercase__ : Optional[int] = False if num_gpus > 1: lowercase__ : Tuple = """MULTI_GPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_xpu_available() and use_xpu: lowercase__ : Union[str, Any] = torch.xpu.device_count() lowercase__ : str = num_xpus lowercase__ : List[Any] = False if num_xpus > 1: lowercase__ : str = """MULTI_XPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_npu_available(): lowercase__ : Tuple = torch.npu.device_count() lowercase__ : Union[str, Any] = num_npus lowercase__ : Union[str, Any] = False if num_npus > 1: lowercase__ : List[Any] = """MULTI_NPU""" else: lowercase__ : int = """NO""" else: lowercase__ : Union[str, Any] = 0 lowercase__ : str = True lowercase__ : Union[str, Any] = 1 lowercase__ : int = """NO""" lowercase__ : Tuple = ClusterConfig(**lowercase_ ) config.to_json_file(lowercase_ ) return path def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ ) parser.add_argument( """--config_file""" , default=lowercase_ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=lowercase_ ) return parser def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'accelerate configuration saved at {config_file}' )
12
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = ['image_processor', 'tokenizer'] UpperCamelCase : Union[str, Any] = 'CLIPImageProcessor' UpperCamelCase : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :Union[str, Any] ) ->int: lowercase = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , SCREAMING_SNAKE_CASE_ , ) lowercase = kwargs.pop("feature_extractor" ) lowercase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __call__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :List[Any]=None , **lowerCAmelCase__ :List[str] ) ->Dict: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if images is not None: lowercase = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if text is not None and images is not None: lowercase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE( self :Tuple , *lowerCAmelCase__ :str , **lowerCAmelCase__ :Tuple ) ->List[Any]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE( self :Any , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :List[str] ) ->int: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Optional[int]: lowercase = self.tokenizer.model_input_names lowercase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE( self :Dict ) ->Tuple: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE_ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE( self :Dict ) ->List[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE_ , ) return self.image_processor
441
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Union[str, Any] = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = 'convbert' def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase__ : Dict = vocab_size lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Tuple = max_position_embeddings lowercase__ : Dict = type_vocab_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Dict = layer_norm_eps lowercase__ : Tuple = embedding_size lowercase__ : List[str] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Dict = num_groups lowercase__ : int = classifier_dropout class _snake_case ( UpperCAmelCase_ ): @property def lowercase__ ( self): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ : str = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ])
12
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : str = logging.get_logger(__name__) A__ : int = { """asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""", # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Dict = 'sew' def __init__( self , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="group" , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="mean" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = hidden_size __lowerCamelCase : int = feat_extract_norm __lowerCamelCase : Optional[int] = feat_extract_activation __lowerCamelCase : Any = list(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = list(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = conv_bias __lowerCamelCase : Dict = num_conv_pos_embeddings __lowerCamelCase : Optional[Any] = num_conv_pos_embedding_groups __lowerCamelCase : Dict = len(self.conv_dim ) __lowerCamelCase : Optional[Any] = num_hidden_layers __lowerCamelCase : Tuple = intermediate_size __lowerCamelCase : List[Any] = squeeze_factor __lowerCamelCase : List[str] = hidden_act __lowerCamelCase : Dict = num_attention_heads __lowerCamelCase : Dict = hidden_dropout __lowerCamelCase : Tuple = attention_dropout __lowerCamelCase : Dict = activation_dropout __lowerCamelCase : Optional[int] = feat_proj_dropout __lowerCamelCase : Tuple = final_dropout __lowerCamelCase : str = layerdrop __lowerCamelCase : int = layer_norm_eps __lowerCamelCase : int = initializer_range __lowerCamelCase : Optional[int] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase : Optional[Any] = apply_spec_augment __lowerCamelCase : Tuple = mask_time_prob __lowerCamelCase : Any = mask_time_length __lowerCamelCase : int = mask_time_min_masks __lowerCamelCase : int = mask_feature_prob __lowerCamelCase : Dict = mask_feature_length __lowerCamelCase : List[str] = mask_feature_min_masks # ctc loss __lowerCamelCase : Any = ctc_loss_reduction __lowerCamelCase : List[Any] = ctc_zero_infinity # sequence classification __lowerCamelCase : List[Any] = use_weighted_layer_sum __lowerCamelCase : Union[str, Any] = classifier_proj_size @property def lowercase_ ( self ) -> List[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
13
'''simple docstring''' from __future__ import annotations import os from collections.abc import Mapping A__ : Optional[Any] = tuple[int, int] class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: __lowerCamelCase : set[int] = vertices __lowerCamelCase : dict[EdgeT, int] = { (min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items() } def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) __lowerCamelCase : Union[str, Any] = weight def lowercase_ ( self ) -> Graph: __lowerCamelCase : Graph = Graph({min(self.vertices )} , {} ) __lowerCamelCase : EdgeT __lowerCamelCase : int __lowerCamelCase : EdgeT __lowerCamelCase : int while len(subgraph.vertices ) < len(self.vertices ): __lowerCamelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __lowerCamelCase : Optional[int] = edge __lowerCamelCase : List[str] = weight subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return subgraph def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int: __lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) ) __lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : dict[EdgeT, int] = {} __lowerCamelCase : list[str] __lowerCamelCase : int __lowerCamelCase : int with open(UpperCAmelCase_ ) as f: __lowerCamelCase : Any = f.read().strip().split('\n' ) __lowerCamelCase : Any = [line.split(',' ) for line in data] for edgea in range(1 , len(UpperCAmelCase_ ) ): for edgea in range(UpperCAmelCase_ ): if adjaceny_matrix[edgea][edgea] != "-": __lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] ) __lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ ) __lowerCamelCase : Graph = graph.prims_algorithm() __lowerCamelCase : int = sum(graph.edges.values() ) __lowerCamelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) A__ : List[str] = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys A__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
'''simple docstring''' from collections.abc import Generator from math import sin def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: if len(UpperCAmelCase_ ) != 32: raise ValueError('Input must be of length 32' ) __lowerCamelCase : Dict = B'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes: if i < 0: raise ValueError('Input must be non-negative' ) __lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:] __lowerCamelCase : str = B'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: __lowerCamelCase : Optional[Any] = B'' for char in message: bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' ) __lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCAmelCase_ ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]: if len(UpperCAmelCase_ ) % 5_12 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ): __lowerCamelCase : Any = bit_string[pos : pos + 5_12] __lowerCamelCase : Optional[int] = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: if i < 0: raise ValueError('Input must be non-negative' ) __lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' ) __lowerCamelCase : Optional[int] = '' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCAmelCase_ , 2 ) def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: return (a + b) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: __lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __lowerCamelCase : Dict = 0x67_45_23_01 __lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89 __lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe __lowerCamelCase : Union[str, Any] = 0x10_32_54_76 __lowerCamelCase : List[str] = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCAmelCase_ ): __lowerCamelCase : Dict = aa __lowerCamelCase : Tuple = ba __lowerCamelCase : List[Any] = ca __lowerCamelCase : Dict = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __lowerCamelCase : List[str] = d ^ (b & (c ^ d)) __lowerCamelCase : Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __lowerCamelCase : Optional[int] = c ^ (d & (b ^ c)) __lowerCamelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: __lowerCamelCase : str = b ^ c ^ d __lowerCamelCase : Any = (3 * i + 5) % 16 else: __lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ )) __lowerCamelCase : int = (7 * i) % 16 __lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32 __lowerCamelCase : Optional[Any] = d __lowerCamelCase : Tuple = c __lowerCamelCase : Optional[int] = b __lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) ) # Add hashed chunk to running total __lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = 'char' lowerCamelCase : Dict = 'bpe' lowerCamelCase : int = 'wp' A__ : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = ['image_processor', 'char_tokenizer'] lowerCamelCase : int = 'ViTImageProcessor' lowerCamelCase : Any = 'MgpstrTokenizer' def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: __lowerCamelCase : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Dict = kwargs.pop('feature_extractor' ) __lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) __lowerCamelCase : List[Any] = tokenizer __lowerCamelCase : int = AutoTokenizer.from_pretrained('gpt2' ) __lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: __lowerCamelCase : Tuple = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if text is not None: __lowerCamelCase : int = self.char_tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if text is None: return inputs elif images is None: return encodings else: __lowerCamelCase : Union[str, Any] = encodings['input_ids'] return inputs def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = sequences __lowerCamelCase : Optional[int] = char_preds.size(0 ) __lowerCamelCase , __lowerCamelCase : Union[str, Any] = self._decode_helper(SCREAMING_SNAKE_CASE_ , 'char' ) __lowerCamelCase , __lowerCamelCase : str = self._decode_helper(SCREAMING_SNAKE_CASE_ , 'bpe' ) __lowerCamelCase , __lowerCamelCase : Tuple = self._decode_helper(SCREAMING_SNAKE_CASE_ , 'wp' ) __lowerCamelCase : Optional[Any] = [] __lowerCamelCase : Union[str, Any] = [] for i in range(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]] __lowerCamelCase : str = [char_strs[i], bpe_strs[i], wp_strs[i]] __lowerCamelCase : Dict = scores.index(max(SCREAMING_SNAKE_CASE_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __lowerCamelCase : List[str] = {} __lowerCamelCase : Optional[Any] = final_strs __lowerCamelCase : Union[str, Any] = final_scores __lowerCamelCase : Optional[int] = char_strs __lowerCamelCase : Tuple = bpe_strs __lowerCamelCase : Optional[int] = wp_strs return out def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if format == DecodeType.CHARACTER: __lowerCamelCase : int = self.char_decode __lowerCamelCase : str = 1 __lowerCamelCase : Optional[int] = '[s]' elif format == DecodeType.BPE: __lowerCamelCase : Optional[Any] = self.bpe_decode __lowerCamelCase : int = 2 __lowerCamelCase : Union[str, Any] = '#' elif format == DecodeType.WORDPIECE: __lowerCamelCase : Optional[Any] = self.wp_decode __lowerCamelCase : Optional[Any] = 1_02 __lowerCamelCase : Any = '[SEP]' else: raise ValueError(f'Format {format} is not supported.' ) __lowerCamelCase , __lowerCamelCase : Optional[int] = [], [] __lowerCamelCase : Dict = pred_logits.size(0 ) __lowerCamelCase : str = pred_logits.size(1 ) __lowerCamelCase , __lowerCamelCase : List[Any] = pred_logits.topk(1 , dim=-1 , largest=SCREAMING_SNAKE_CASE_ , sorted=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = preds_index.view(-1 , SCREAMING_SNAKE_CASE_ )[:, 1:] __lowerCamelCase : Any = decoder(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase , __lowerCamelCase : Union[str, Any] = torch.nn.functional.softmax(SCREAMING_SNAKE_CASE_ , dim=2 ).max(dim=2 ) __lowerCamelCase : Dict = preds_max_prob[:, 1:] for index in range(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = preds_str[index].find(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = preds_str[index][:pred_eos] __lowerCamelCase : Tuple = preds_index[index].cpu().tolist() __lowerCamelCase : Any = pred_index.index(SCREAMING_SNAKE_CASE_ ) if eos_token in pred_index else -1 __lowerCamelCase : Union[str, Any] = preds_max_prob[index][: pred_eos_index + 1] __lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(SCREAMING_SNAKE_CASE_ ) conf_scores.append(SCREAMING_SNAKE_CASE_ ) return dec_strs, conf_scores def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Any = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )] return decode_strs def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Tuple: return self.bpe_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: __lowerCamelCase : Any = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )] return decode_strs
13
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : Dict = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = 'rwkv' lowerCamelCase : Any = {'max_position_embeddings': 'context_length'} def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: __lowerCamelCase : Optional[int] = vocab_size __lowerCamelCase : Tuple = context_length __lowerCamelCase : str = hidden_size __lowerCamelCase : List[str] = num_hidden_layers __lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size __lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size __lowerCamelCase : Optional[Any] = layer_norm_epsilon __lowerCamelCase : int = rescale_every __lowerCamelCase : Tuple = use_cache __lowerCamelCase : int = bos_token_id __lowerCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' from __future__ import annotations A__ : Dict = 8.988e9 # units = N * m^s * C^-2 def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> dict[str, float]: __lowerCamelCase : Union[str, Any] = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if distance < 0: raise ValueError('Distance cannot be negative' ) if force == 0: __lowerCamelCase : Any = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __lowerCamelCase : str = abs(UpperCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __lowerCamelCase : List[Any] = abs(UpperCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __lowerCamelCase : Any = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase_ )) ** 0.5 return {"distance": distance} raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int: __lowerCamelCase : Union[str, Any] = 3 __lowerCamelCase : Dict = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path A__ : List[str] = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) A__ : List[str] = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} A__ : Tuple = """zero2""" A__ : str = """zero3""" A__ : int = [ZEROa, ZEROa] def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str ) -> int: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __lowerCamelCase : Dict = parameterized.to_safe_name('_'.join(str(UpperCAmelCase_ ) for x in param.args ) ) return F'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test A__ : Union[str, Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" @parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: self.run_and_check( stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , ) @require_torch_multi_gpu @parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: self.run_and_check( stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , ) @parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: self.run_and_check( stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , ) @require_torch_multi_gpu @parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: self.run_and_check( stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ) -> str: __lowerCamelCase : List[Any] = models[model] __lowerCamelCase : Union[str, Any] = self.run_trainer( stage=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , eval_steps=SCREAMING_SNAKE_CASE_ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , ) self.do_checks(SCREAMING_SNAKE_CASE_ ) return output_dir def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ) -> Optional[Any]: __lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir('./xxx' , after=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(SCREAMING_SNAKE_CASE_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __lowerCamelCase : str = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() __lowerCamelCase : Any = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] __lowerCamelCase : Optional[int] = self.get_launcher(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() ) return output_dir def lowercase_ ( self , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]: # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) __lowerCamelCase : List[Any] = min(2 , get_gpu_count() ) if distributed else 1 return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Dict = XGLMConfig lowerCamelCase : List[str] = {} lowerCamelCase : Union[str, Any] = 'gelu' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any: __lowerCamelCase : int = parent __lowerCamelCase : Optional[int] = batch_size __lowerCamelCase : Optional[Any] = seq_length __lowerCamelCase : Optional[int] = is_training __lowerCamelCase : str = use_input_mask __lowerCamelCase : Dict = use_labels __lowerCamelCase : Union[str, Any] = vocab_size __lowerCamelCase : List[Any] = d_model __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : Optional[Any] = ffn_dim __lowerCamelCase : List[Any] = activation_function __lowerCamelCase : List[Any] = activation_dropout __lowerCamelCase : List[Any] = attention_dropout __lowerCamelCase : Union[str, Any] = max_position_embeddings __lowerCamelCase : Tuple = initializer_range __lowerCamelCase : int = None __lowerCamelCase : int = 0 __lowerCamelCase : Tuple = 2 __lowerCamelCase : Tuple = 1 def lowercase_ ( self ) -> Any: return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Optional[Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) __lowerCamelCase : Optional[int] = None if self.use_input_mask: __lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase : str = self.get_config() __lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase_ ( self ) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> str: __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : str = config_and_inputs __lowerCamelCase : Union[str, Any] = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase : Any = ( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase : List[Any] = False lowerCamelCase : Dict = False lowerCamelCase : Union[str, Any] = False def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : str = TFXGLMModelTester(self ) __lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 ) def lowercase_ ( self ) -> Dict: self.config_tester.run_common_tests() @slow def lowercase_ ( self ) -> Optional[int]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def lowercase_ ( self ) -> Any: super().test_resize_token_embeddings() @require_tf class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]: __lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on __lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ ) @slow def lowercase_ ( self ) -> int: __lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) __lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' ) __lowerCamelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): __lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] ) __lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def lowercase_ ( self ) -> int: __lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Any = 'left' # use different length sentences to test batching __lowerCamelCase : Any = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = inputs['input_ids'] __lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 ) __lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids __lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 ) __lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids __lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 ) __lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
13
1
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers A__ : Dict = [ """python""", """tqdm""", """regex""", """requests""", """packaging""", """filelock""", """numpy""", """tokenizers""", """huggingface-hub""", """safetensors""", """accelerate""", """pyyaml""", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]: require_version(deps[pkg] , UpperCAmelCase_ )
13
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : List[str] = logging.get_logger(__name__) # TODO Update this A__ : Tuple = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = 'esm' def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = vocab_size __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : str = num_hidden_layers __lowerCamelCase : List[str] = num_attention_heads __lowerCamelCase : Any = intermediate_size __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : Optional[int] = max_position_embeddings __lowerCamelCase : str = initializer_range __lowerCamelCase : Optional[int] = layer_norm_eps __lowerCamelCase : List[str] = position_embedding_type __lowerCamelCase : int = use_cache __lowerCamelCase : Optional[Any] = emb_layer_norm_before __lowerCamelCase : Optional[Any] = token_dropout __lowerCamelCase : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) __lowerCamelCase : Dict = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) __lowerCamelCase : List[str] = get_default_vocab_list() else: __lowerCamelCase : Optional[Any] = vocab_list else: __lowerCamelCase : Dict = None __lowerCamelCase : Optional[Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def lowercase_ ( self ) -> Any: __lowerCamelCase : Any = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : int = self.esmfold_config.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str = None lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : float = 0 lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : int = 1_2_8 lowerCamelCase : "TrunkConfig" = None def lowercase_ ( self ) -> Any: if self.trunk is None: __lowerCamelCase : List[str] = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Any = TrunkConfig(**self.trunk ) def lowercase_ ( self ) -> int: __lowerCamelCase : Optional[int] = asdict(self ) __lowerCamelCase : str = self.trunk.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 4_8 lowerCamelCase : int = 1_0_2_4 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 3_2 lowerCamelCase : int = 3_2 lowerCamelCase : int = 3_2 lowerCamelCase : float = 0 lowerCamelCase : float = 0 lowerCamelCase : bool = False lowerCamelCase : int = 4 lowerCamelCase : Optional[int] = 1_2_8 lowerCamelCase : "StructureModuleConfig" = None def lowercase_ ( self ) -> Optional[int]: if self.structure_module is None: __lowerCamelCase : Dict = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' f' {self.sequence_state_dim} and {self.sequence_state_dim}.' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' ) __lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width __lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' ) if self.dropout >= 0.4: raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : List[str] = asdict(self ) __lowerCamelCase : int = self.structure_module.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 3_8_4 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 1_6 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 1_2 lowerCamelCase : int = 4 lowerCamelCase : int = 8 lowerCamelCase : float = 0.1 lowerCamelCase : int = 8 lowerCamelCase : int = 1 lowerCamelCase : int = 2 lowerCamelCase : int = 7 lowerCamelCase : int = 1_0 lowerCamelCase : float = 1e-8 lowerCamelCase : float = 1e5 def lowercase_ ( self ) -> Any: return asdict(self ) def UpperCAmelCase__ ( ) -> Optional[Any]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
13
1
'''simple docstring''' A__ : List[str] = [0, 2, 4, 6, 8] A__ : Union[str, Any] = [1, 3, 5, 7, 9] def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ) -> int: if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 __lowerCamelCase : Optional[Any] = 0 for digit in range(10 ): __lowerCamelCase : Optional[int] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , UpperCAmelCase_ , UpperCAmelCase_ ) return result __lowerCamelCase : Optional[int] = 0 for digita in range(10 ): __lowerCamelCase : List[Any] = digita if (remainder + digita) % 2 == 0: __lowerCamelCase : int = ODD_DIGITS else: __lowerCamelCase : Union[str, Any] = EVEN_DIGITS for digita in other_parity_digits: __lowerCamelCase : Optional[int] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCAmelCase_ , UpperCAmelCase_ , ) return result def UpperCAmelCase__ ( UpperCAmelCase_ : int = 9 ) -> int: __lowerCamelCase : List[Any] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(UpperCAmelCase_ , 0 , [0] * length , UpperCAmelCase_ ) return result if __name__ == "__main__": print(f'''{solution() = }''')
13
'''simple docstring''' A__ : dict[tuple[int, int, int], int] = {} def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on __lowerCamelCase : List[Any] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one __lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 __lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter __lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 ) __lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime __lowerCamelCase : Union[str, Any] = prizestrings return prizestrings def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int: return _calculate(UpperCAmelCase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
13
1
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__ : Dict = logging.get_logger(__name__) A__ : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} A__ : Any = { """vocab_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), }, """merges_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), }, } A__ : Tuple = { """allenai/longformer-base-4096""": 4096, """allenai/longformer-large-4096""": 4096, """allenai/longformer-large-4096-finetuned-triviaqa""": 4096, """allenai/longformer-base-4096-extra.pos.embd.only""": 4096, """allenai/longformer-large-4096-extra.pos.embd.only""": 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def UpperCAmelCase__ ( ) -> Optional[int]: __lowerCamelCase : Tuple = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) __lowerCamelCase : str = bs[:] __lowerCamelCase : Tuple = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCAmelCase_ ) cs.append(2**8 + n ) n += 1 __lowerCamelCase : Union[str, Any] = [chr(UpperCAmelCase_ ) for n in cs] return dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase : Union[str, Any] = set() __lowerCamelCase : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase : List[str] = char return pairs class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = VOCAB_FILES_NAMES lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : str = ['input_ids', 'attention_mask'] def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]: __lowerCamelCase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token __lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token __lowerCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token __lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token __lowerCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token __lowerCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle: __lowerCamelCase : Optional[Any] = json.load(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = {v: k for k, v in self.encoder.items()} __lowerCamelCase : List[str] = errors # how to handle errors in decoding __lowerCamelCase : Dict = bytes_to_unicode() __lowerCamelCase : Optional[int] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle: __lowerCamelCase : Union[str, Any] = merges_handle.read().split('\n' )[1:-1] __lowerCamelCase : Tuple = [tuple(merge.split() ) for merge in bpe_merges] __lowerCamelCase : List[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) __lowerCamelCase : Dict = {} __lowerCamelCase : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowerCamelCase : Tuple = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def lowercase_ ( self ) -> Dict: return len(self.encoder ) def lowercase_ ( self ) -> Union[str, Any]: return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict: if token in self.cache: return self.cache[token] __lowerCamelCase : str = tuple(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: __lowerCamelCase : str = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase : Dict = bigram __lowerCamelCase : List[str] = [] __lowerCamelCase : int = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: __lowerCamelCase : Optional[int] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowerCamelCase : int = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase : Tuple = tuple(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: __lowerCamelCase : str = get_pairs(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = ' '.join(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = word return word def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : str = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) ) return bpe_tokens def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: return self.decoder.get(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowerCamelCase : Any = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __lowerCamelCase : Optional[Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' ) __lowerCamelCase : Dict = 0 with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!' ) __lowerCamelCase : List[Any] = token_index writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' ) index += 1 return vocab_file, merge_file def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCamelCase : List[str] = [self.cls_token_id] __lowerCamelCase : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]: __lowerCamelCase : List[str] = [self.sep_token_id] __lowerCamelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()): __lowerCamelCase : List[str] = ' ' + text return (text, kwargs)
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str lowerCamelCase : Optional[str] = None lowerCamelCase : Optional[Union[str, int]] = None lowerCamelCase : Optional[Union[str, int]] = None lowerCamelCase : Optional[Union[str, int]] = None def lowercase_ ( self ) -> List[str]: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self ) -> Any: return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def lowercase_ ( self ) -> int: return self.major, self.minor, self.patch def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return Version(SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return other raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' ) def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: try: __lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ ) return self.tuple < other.tuple def __hash__( self ) -> List[str]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase_ ( self ) -> str: return self.version_str def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str: __lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict: return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
13
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]: __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Dict = batch_size __lowerCamelCase : int = image_size __lowerCamelCase : List[str] = patch_size __lowerCamelCase : Optional[int] = num_channels __lowerCamelCase : Any = is_training __lowerCamelCase : Dict = use_labels __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Optional[Any] = num_attention_heads __lowerCamelCase : Dict = intermediate_size __lowerCamelCase : Union[str, Any] = hidden_act __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : str = type_sequence_label_size __lowerCamelCase : List[str] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (image_size // patch_size) ** 2 __lowerCamelCase : Optional[int] = num_patches + 1 def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : Optional[int] = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (self.image_size, self.image_size) __lowerCamelCase : str = (self.patch_size, self.patch_size) __lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: __lowerCamelCase : Tuple = self.type_sequence_label_size __lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCamelCase : List[str] = 1 __lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : int = config_and_inputs __lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowercase_ ( self ) -> None: __lowerCamelCase : str = FlaxViTModelTester(self ) __lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def lowercase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : List[str] = [*signature.parameters.keys()] __lowerCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest('JIT Enabled' ): __lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self ) -> List[Any]: for model_class_name in self.all_model_classes: __lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' ) __lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
13
'''simple docstring''' import sys from collections import defaultdict class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> int: __lowerCamelCase : Any = [] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any: return self.node_position[vertex] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: __lowerCamelCase : Optional[int] = pos def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase : str = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase : Optional[Any] = 2 * start + 1 else: __lowerCamelCase : int = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase : int = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase : str = temp, tempa __lowerCamelCase : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ ) self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Any = position[index] while index != 0: __lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase : Union[str, Any] = heap[parent] __lowerCamelCase : Any = position[parent] self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase : Tuple = val __lowerCamelCase : List[str] = temp self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) break __lowerCamelCase : Tuple = parent else: __lowerCamelCase : Union[str, Any] = val __lowerCamelCase : Tuple = temp self.set_position(SCREAMING_SNAKE_CASE_ , 0 ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: __lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1 for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ): self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : Any = positions[0] __lowerCamelCase : Union[str, Any] = sys.maxsize self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return temp def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str: __lowerCamelCase : List[Any] = Heap() __lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ ) __lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase : Tuple = [] for vertex in range(len(UpperCAmelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCAmelCase_ ) heap.node_position.append(UpperCAmelCase_ ) __lowerCamelCase : Tuple = [] __lowerCamelCase : Dict = 1 __lowerCamelCase : str = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase : Any = 0 __lowerCamelCase : Any = distance heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ ) for _ in range(1 , len(UpperCAmelCase_ ) ): __lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase : Union[str, Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCAmelCase_ )] ): __lowerCamelCase : Dict = distance heap.bottom_to_top( UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > A__ : Tuple = int(input("""Enter number of edges: """).strip()) A__ : str = defaultdict(list) for _ in range(edges_number): A__ : Optional[int] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : torch.FloatTensor class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" @register_to_config def __init__( self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ) -> Optional[Any]: super().__init__() __lowerCamelCase : int = num_attention_heads __lowerCamelCase : str = attention_head_dim __lowerCamelCase : Optional[int] = num_attention_heads * attention_head_dim __lowerCamelCase : Tuple = in_channels __lowerCamelCase : List[str] = torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , eps=1E-6 , affine=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. Define transformers blocks __lowerCamelCase : List[str] = nn.ModuleList( [ BasicTransformerBlock( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , double_self_attention=SCREAMING_SNAKE_CASE_ , norm_elementwise_affine=SCREAMING_SNAKE_CASE_ , ) for d in range(SCREAMING_SNAKE_CASE_ ) ] ) __lowerCamelCase : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = hidden_states.shape __lowerCamelCase : Dict = batch_frames // num_frames __lowerCamelCase : Tuple = hidden_states __lowerCamelCase : Any = hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __lowerCamelCase : List[str] = self.norm(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = self.proj_in(SCREAMING_SNAKE_CASE_ ) # 2. Blocks for block in self.transformer_blocks: __lowerCamelCase : List[Any] = block( SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ , ) # 3. Output __lowerCamelCase : Dict = self.proj_out(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = ( hidden_states[None, None, :] .reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __lowerCamelCase : List[Any] = hidden_states.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE_ )
13
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int: __lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6 __lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bool: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise ValueError('check_bouncy() accepts only integer arguments' ) __lowerCamelCase : Optional[int] = str(UpperCAmelCase_ ) __lowerCamelCase : List[str] = ''.join(sorted(UpperCAmelCase_ ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def UpperCAmelCase__ ( UpperCAmelCase_ : float = 99 ) -> int: if not 0 < percent < 1_00: raise ValueError('solution() only accepts values from 0 to 100' ) __lowerCamelCase : Tuple = 0 __lowerCamelCase : int = 1 while True: if check_bouncy(UpperCAmelCase_ ): bouncy_num += 1 if (bouncy_num / num) * 1_00 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
13
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]: __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Dict = batch_size __lowerCamelCase : int = image_size __lowerCamelCase : List[str] = patch_size __lowerCamelCase : Optional[int] = num_channels __lowerCamelCase : Any = is_training __lowerCamelCase : Dict = use_labels __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Optional[Any] = num_attention_heads __lowerCamelCase : Dict = intermediate_size __lowerCamelCase : Union[str, Any] = hidden_act __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : str = type_sequence_label_size __lowerCamelCase : List[str] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (image_size // patch_size) ** 2 __lowerCamelCase : Optional[int] = num_patches + 1 def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : Optional[int] = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (self.image_size, self.image_size) __lowerCamelCase : str = (self.patch_size, self.patch_size) __lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: __lowerCamelCase : Tuple = self.type_sequence_label_size __lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCamelCase : List[str] = 1 __lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : int = config_and_inputs __lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowercase_ ( self ) -> None: __lowerCamelCase : str = FlaxViTModelTester(self ) __lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def lowercase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : List[str] = [*signature.parameters.keys()] __lowerCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest('JIT Enabled' ): __lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self ) -> List[Any]: for model_class_name in self.all_model_classes: __lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' ) __lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin A__ : int = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A__ : int = 250004 A__ : Union[str, Any] = 250020 @require_sentencepiece @require_tokenizers class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[str] = MBartaaTokenizer lowerCamelCase : Optional[int] = MBartaaTokenizerFast lowerCamelCase : str = True lowerCamelCase : Any = True def lowercase_ ( self ) -> Optional[int]: super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase : List[str] = MBartaaTokenizer(SCREAMING_SNAKE_CASE_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self ) -> str: __lowerCamelCase : List[Any] = '<s>' __lowerCamelCase : Optional[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 10_54 ) def lowercase_ ( self ) -> Optional[int]: self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[Any] = MBartaaTokenizer(SCREAMING_SNAKE_CASE_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = tokenizer.tokenize('This is a test' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __lowerCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) __lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def lowercase_ ( self ) -> int: # fmt: off __lowerCamelCase : str = {'input_ids': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def lowercase_ ( self ) -> Optional[int]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __lowerCamelCase : List[Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = tempfile.mkdtemp() __lowerCamelCase : Dict = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) __lowerCamelCase : Union[str, Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Checks everything loads correctly in the same way __lowerCamelCase : Optional[int] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) # Save tokenizer rust, legacy_format=True __lowerCamelCase : str = tempfile.mkdtemp() __lowerCamelCase : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it save with the same files self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Checks everything loads correctly in the same way __lowerCamelCase : Tuple = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) # Save tokenizer rust, legacy_format=False __lowerCamelCase : int = tempfile.mkdtemp() __lowerCamelCase : str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __lowerCamelCase : Tuple = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" lowerCamelCase : List[str] = 'facebook/mbart-large-50-one-to-many-mmt' lowerCamelCase : Dict = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowerCamelCase : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowerCamelCase : List[Any] = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def lowercase_ ( cls ) -> str: __lowerCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) __lowerCamelCase : List[Any] = 1 return cls def lowercase_ ( self ) -> str: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_00_38 ) def lowercase_ ( self ) -> Any: __lowerCamelCase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Dict: self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids ) __lowerCamelCase : List[str] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __lowerCamelCase : Union[str, Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Dict: __lowerCamelCase : Optional[Any] = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = 10 __lowerCamelCase : Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0] self.assertEqual(ids[0] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_00_53, 25_00_01] ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : str = tempfile.mkdtemp() __lowerCamelCase : List[str] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = MBartaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ ) @require_torch def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) __lowerCamelCase : Any = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : Tuple = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) __lowerCamelCase : Tuple = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __lowerCamelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Dict = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' ) __lowerCamelCase : Optional[Any] = self.tokenizer( text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' ) __lowerCamelCase : Optional[Any] = targets['input_ids'] __lowerCamelCase : List[str] = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowercase_ ( self ) -> int: __lowerCamelCase : List[str] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , { # en_XX, A, test, EOS 'input_ids': [[25_00_04, 62, 30_34, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_00_01, } , )
13
'''simple docstring''' import argparse A__ : Optional[Any] = """docs/source/_static/js/custom.js""" def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int: with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f: __lowerCamelCase : Dict = f.readlines() __lowerCamelCase : Tuple = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 __lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += F' "v{version}": "v{version}",\n' with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(UpperCAmelCase_ ) if __name__ == "__main__": A__ : str = argparse.ArgumentParser() parser.add_argument("""--version""", help="""Release version.""") A__ : Any = parser.parse_args() update_custom_js(args.version)
13
1
'''simple docstring''' from functools import reduce A__ : Union[str, Any] = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def UpperCAmelCase__ ( UpperCAmelCase_ : str = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCAmelCase_ , UpperCAmelCase_ : str(int(UpperCAmelCase_ ) * int(UpperCAmelCase_ ) ) , n[i : i + 13] ) ) for i in range(len(UpperCAmelCase_ ) - 12 ) ) if __name__ == "__main__": print(f'''{solution() = }''')
13
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape __lowerCamelCase : Dict = jax.image.resize( SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) __lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ ) return hidden_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> List[str]: __lowerCamelCase : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ ) return hidden_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : int = None lowerCamelCase : float = 0.0 lowerCamelCase : bool = None lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels __lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __lowerCamelCase : Tuple = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype ) __lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __lowerCamelCase : int = nn.Dropout(self.dropout_prob ) __lowerCamelCase : Union[str, Any] = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __lowerCamelCase : List[Any] = None if use_nin_shortcut: __lowerCamelCase : Any = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple: __lowerCamelCase : List[Any] = hidden_states __lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 ) __lowerCamelCase : Optional[int] = hidden_states + temb __lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ ) if self.conv_shortcut is not None: __lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ ) return hidden_states + residual
13
1
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) -> int: # load base model __lowerCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __lowerCamelCase : List[str] = load_file(UpperCAmelCase_ ) __lowerCamelCase : Dict = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __lowerCamelCase : Optional[int] = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) __lowerCamelCase : Union[str, Any] = pipeline.text_encoder else: __lowerCamelCase : int = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) __lowerCamelCase : Any = pipeline.unet # find the target layer __lowerCamelCase : List[Any] = layer_infos.pop(0 ) while len(UpperCAmelCase_ ) > -1: try: __lowerCamelCase : Optional[int] = curr_layer.__getattr__(UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: __lowerCamelCase : str = layer_infos.pop(0 ) elif len(UpperCAmelCase_ ) == 0: break except Exception: if len(UpperCAmelCase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __lowerCamelCase : Dict = layer_infos.pop(0 ) __lowerCamelCase : Optional[int] = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(UpperCAmelCase_ ) else: pair_keys.append(UpperCAmelCase_ ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __lowerCamelCase : List[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __lowerCamelCase : Optional[int] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_ , UpperCAmelCase_ ).unsqueeze(2 ).unsqueeze(3 ) else: __lowerCamelCase : Optional[int] = state_dict[pair_keys[0]].to(torch.floataa ) __lowerCamelCase : Any = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_ , UpperCAmelCase_ ) # update visited list for item in pair_keys: visited.append(UpperCAmelCase_ ) return pipeline if __name__ == "__main__": A__ : List[str] = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.7_5, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") A__ : Optional[int] = parser.parse_args() A__ : Tuple = args.base_model_path A__ : List[Any] = args.checkpoint_path A__ : Union[str, Any] = args.dump_path A__ : List[Any] = args.lora_prefix_unet A__ : Tuple = args.lora_prefix_text_encoder A__ : Tuple = args.alpha A__ : Tuple = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) A__ : List[Any] = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
13
'''simple docstring''' from __future__ import annotations A__ : int = 10 def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]: __lowerCamelCase : List[Any] = 1 __lowerCamelCase : Any = max(UpperCAmelCase_ ) while placement <= max_digit: # declare and initialize empty buckets __lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )] # split list_of_ints between the buckets for i in list_of_ints: __lowerCamelCase : List[Any] = int((i / placement) % RADIX ) buckets[tmp].append(UpperCAmelCase_ ) # put each buckets' contents into list_of_ints __lowerCamelCase : Tuple = 0 for b in range(UpperCAmelCase_ ): for i in buckets[b]: __lowerCamelCase : List[Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar A__ : Optional[Any] = TypeVar("""T""") def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return (position - 1) // 2 def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return (2 * position) + 1 def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return (2 * position) + 2 class UpperCAmelCase_ (Generic[T] ): """simple docstring""" def __init__( self ) -> None: __lowerCamelCase : list[tuple[T, int]] = [] __lowerCamelCase : dict[T, int] = {} __lowerCamelCase : int = 0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def lowercase_ ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) __lowerCamelCase : Any = self.elements self.elements += 1 self._bubble_up(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __lowerCamelCase , __lowerCamelCase : int = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __lowerCamelCase , __lowerCamelCase : Optional[int] = self.heap[0] self._bubble_down(SCREAMING_SNAKE_CASE_ ) return elem def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: # Update the weight of the given key __lowerCamelCase : Union[str, Any] = self.position_map[elem] __lowerCamelCase : Any = (elem, weight) if position > 0: __lowerCamelCase : Union[str, Any] = get_parent_position(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase , __lowerCamelCase : List[str] = self.heap[parent_position] if parent_weight > weight: self._bubble_up(SCREAMING_SNAKE_CASE_ ) else: self._bubble_down(SCREAMING_SNAKE_CASE_ ) else: self._bubble_down(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] __lowerCamelCase : Tuple = self.position_map[elem] if curr_pos == 0: return None __lowerCamelCase : Dict = get_parent_position(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase , __lowerCamelCase : int = self.heap[curr_pos] __lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return self._bubble_up(SCREAMING_SNAKE_CASE_ ) return None def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] __lowerCamelCase : List[str] = self.position_map[elem] __lowerCamelCase , __lowerCamelCase : int = self.heap[curr_pos] __lowerCamelCase : Union[str, Any] = get_child_left_position(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = get_child_right_position(SCREAMING_SNAKE_CASE_ ) if child_left_position < self.elements and child_right_position < self.elements: __lowerCamelCase , __lowerCamelCase : Dict = self.heap[child_left_position] __lowerCamelCase , __lowerCamelCase : List[Any] = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return self._bubble_down(SCREAMING_SNAKE_CASE_ ) if child_left_position < self.elements: __lowerCamelCase , __lowerCamelCase : Optional[int] = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return self._bubble_down(SCREAMING_SNAKE_CASE_ ) else: return None if child_right_position < self.elements: __lowerCamelCase , __lowerCamelCase : List[str] = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return self._bubble_down(SCREAMING_SNAKE_CASE_ ) return None def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: # Swap the nodes at the given positions __lowerCamelCase : List[Any] = self.heap[nodea_pos][0] __lowerCamelCase : Tuple = self.heap[nodea_pos][0] __lowerCamelCase , __lowerCamelCase : Optional[Any] = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __lowerCamelCase : int = nodea_pos __lowerCamelCase : Optional[Any] = nodea_pos class UpperCAmelCase_ (Generic[T] ): """simple docstring""" def __init__( self ) -> None: __lowerCamelCase : dict[T, dict[T, int]] = {} __lowerCamelCase : int = 0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: __lowerCamelCase : Optional[Any] = {} self.nodes += 1 def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: # Add an edge between 2 nodes in the graph self.add_node(SCREAMING_SNAKE_CASE_ ) self.add_node(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = weight __lowerCamelCase : Dict = weight def UpperCAmelCase__ ( UpperCAmelCase_ : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: __lowerCamelCase : dict[T, int] = {node: maxsize for node in graph.connections} __lowerCamelCase : dict[T, T | None] = {node: None for node in graph.connections} __lowerCamelCase : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(UpperCAmelCase_ , UpperCAmelCase_ ) if priority_queue.is_empty(): return dist, parent # initialization __lowerCamelCase : List[Any] = priority_queue.extract_min() __lowerCamelCase : Union[str, Any] = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __lowerCamelCase : int = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(UpperCAmelCase_ , dist[neighbour] ) __lowerCamelCase : int = node # running prim's algorithm while not priority_queue.is_empty(): __lowerCamelCase : List[Any] = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __lowerCamelCase : int = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(UpperCAmelCase_ , dist[neighbour] ) __lowerCamelCase : Union[str, Any] = node return dist, parent
13
'''simple docstring''' from collections import defaultdict from math import gcd def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int: __lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ ) __lowerCamelCase : Any = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ): if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1: continue __lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCAmelCase_ : """simple docstring""" @staticmethod def lowercase_ ( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any: pass @is_pipeline_test @require_vision @require_timm @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: __lowerCamelCase : Dict = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: __lowerCamelCase : Optional[int] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 ) self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 ) for detected_object in outputs: self.assertEqual( SCREAMING_SNAKE_CASE_ , { 'score': ANY(SCREAMING_SNAKE_CASE_ ), 'label': ANY(SCREAMING_SNAKE_CASE_ ), 'box': {'xmin': ANY(SCREAMING_SNAKE_CASE_ ), 'ymin': ANY(SCREAMING_SNAKE_CASE_ ), 'xmax': ANY(SCREAMING_SNAKE_CASE_ ), 'ymax': ANY(SCREAMING_SNAKE_CASE_ )}, } , ) import datasets __lowerCamelCase : Any = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) __lowerCamelCase : str = [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] __lowerCamelCase : Any = object_detector(SCREAMING_SNAKE_CASE_ , threshold=0.0 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for outputs in batch_outputs: self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 ) for detected_object in outputs: self.assertEqual( SCREAMING_SNAKE_CASE_ , { 'score': ANY(SCREAMING_SNAKE_CASE_ ), 'label': ANY(SCREAMING_SNAKE_CASE_ ), 'box': {'xmin': ANY(SCREAMING_SNAKE_CASE_ ), 'ymin': ANY(SCREAMING_SNAKE_CASE_ ), 'xmax': ANY(SCREAMING_SNAKE_CASE_ ), 'ymax': ANY(SCREAMING_SNAKE_CASE_ )}, } , ) @require_tf @unittest.skip('Object detection not implemented in TF' ) def lowercase_ ( self ) -> Any: pass @require_torch def lowercase_ ( self ) -> str: __lowerCamelCase : Union[str, Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3' __lowerCamelCase : Optional[Any] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, ] , ) __lowerCamelCase : Tuple = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, ], [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, ], ] , ) @require_torch @slow def lowercase_ ( self ) -> Dict: __lowerCamelCase : int = 'facebook/detr-resnet-50' __lowerCamelCase : Dict = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ] , ) __lowerCamelCase : Dict = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], ] , ) @require_torch @slow def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : Optional[int] = 'facebook/detr-resnet-50' __lowerCamelCase : str = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ] , ) __lowerCamelCase : str = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], ] , ) @require_torch @slow def lowercase_ ( self ) -> List[str]: __lowerCamelCase : List[Any] = 0.9_9_8_5 __lowerCamelCase : Optional[int] = 'facebook/detr-resnet-50' __lowerCamelCase : int = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=SCREAMING_SNAKE_CASE_ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ] , ) @require_torch @require_pytesseract @slow def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Optional[Any] = 'Narsil/layoutlmv3-finetuned-funsd' __lowerCamelCase : Dict = 0.9_9_9_3 __lowerCamelCase : int = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = object_detector( 'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ {'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}}, {'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}}, ] , )
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A__ : str = logging.get_logger(__name__) A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} A__ : Tuple = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } A__ : str = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } A__ : Tuple = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION lowerCamelCase : Dict = RoFormerTokenizer def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents ): __lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) ) __lowerCamelCase : Union[str, Any] = do_lower_case __lowerCamelCase : str = strip_accents __lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = do_lower_case def __getstate__( self ) -> List[str]: __lowerCamelCase : Union[str, Any] = self.__dict__.copy() __lowerCamelCase : Dict = BertPreTokenizer() return state def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: __lowerCamelCase : Optional[int] = d __lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab() __lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str: __lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]: __lowerCamelCase : List[str] = [self.sep_token_id] __lowerCamelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: __lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any: __lowerCamelCase : Tuple = BertPreTokenizer() return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset A__ : Any = random.Random() def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[Any]=None ) -> Union[str, Any]: if rng is None: __lowerCamelCase : Any = global_rng __lowerCamelCase : List[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=20_00 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_41_00 , ) -> str: __lowerCamelCase : Dict = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : Any = min_seq_length __lowerCamelCase : List[Any] = max_seq_length __lowerCamelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCamelCase : Any = spectrogram_length __lowerCamelCase : Any = feature_size __lowerCamelCase : List[str] = num_audio_channels __lowerCamelCase : Dict = hop_length __lowerCamelCase : Optional[Any] = chunk_length __lowerCamelCase : Optional[Any] = sampling_rate def lowercase_ ( self ) -> List[str]: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowercase_ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Dict: def _flatten(SCREAMING_SNAKE_CASE_ ): return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) ) if equal_length: __lowerCamelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCamelCase : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Dict = TvltFeatureExtractor def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : Any = TvltFeatureExtractionTester(self ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'spectrogram_length' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'feature_size' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_audio_channels' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'hop_length' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'chunk_length' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'sampling_rate' ) ) def lowercase_ ( self ) -> Dict: __lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase : List[str] = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE_ )[0] check_json_file_has_correct_format(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = feat_extract_first.to_dict() __lowerCamelCase : Optional[int] = feat_extract_second.to_dict() __lowerCamelCase : Union[str, Any] = dict_first.pop('mel_filters' ) __lowerCamelCase : List[Any] = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , 'feat_extract.json' ) feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = feat_extract_first.to_dict() __lowerCamelCase : Optional[Any] = feat_extract_second.to_dict() __lowerCamelCase : Union[str, Any] = dict_first.pop('mel_filters' ) __lowerCamelCase : str = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[str]: # Initialize feature_extractor __lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 __lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs] # Test not batched input __lowerCamelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched __lowerCamelCase : Any = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking __lowerCamelCase : Dict = feature_extractor( SCREAMING_SNAKE_CASE_ , return_tensors='np' , sampling_rate=4_41_00 , mask_audio=SCREAMING_SNAKE_CASE_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. __lowerCamelCase : Optional[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __lowerCamelCase : int = np.asarray(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: __lowerCamelCase : Union[str, Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech __lowerCamelCase : Union[str, Any] = ds.sort('id' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def lowercase_ ( self ) -> Dict: __lowerCamelCase : Tuple = self._load_datasamples(1 ) __lowerCamelCase : List[Any] = TvltFeatureExtractor() __lowerCamelCase : List[str] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) __lowerCamelCase : str = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer A__ : int = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast A__ : Dict = TaTokenizerFast A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys A__ : Union[str, Any] = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Any = KandinskyVaaInpaintPipeline lowerCamelCase : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image'] lowerCamelCase : List[Any] = [ 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] lowerCamelCase : Union[str, Any] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] lowerCamelCase : str = False @property def lowercase_ ( self ) -> Union[str, Any]: return 32 @property def lowercase_ ( self ) -> str: return 32 @property def lowercase_ ( self ) -> Tuple: return self.time_input_dim @property def lowercase_ ( self ) -> Optional[Any]: return self.time_input_dim * 4 @property def lowercase_ ( self ) -> Optional[Any]: return 1_00 @property def lowercase_ ( self ) -> Any: torch.manual_seed(0 ) __lowerCamelCase : str = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ ) return model @property def lowercase_ ( self ) -> List[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase_ ( self ) -> Optional[int]: torch.manual_seed(0 ) __lowerCamelCase : List[Any] = VQModel(**self.dummy_movq_kwargs ) return model def lowercase_ ( self ) -> Any: __lowerCamelCase : List[str] = self.dummy_unet __lowerCamelCase : str = self.dummy_movq __lowerCamelCase : List[str] = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='epsilon' , thresholding=SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Optional[Any] = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Tuple: __lowerCamelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( SCREAMING_SNAKE_CASE_ ) # create init_image __lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((2_56, 2_56) ) # create mask __lowerCamelCase : Optional[Any] = np.ones((64, 64) , dtype=np.floataa ) __lowerCamelCase : Dict = 0 if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): __lowerCamelCase : Any = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def lowercase_ ( self ) -> str: __lowerCamelCase : List[str] = 'cpu' __lowerCamelCase : int = self.get_dummy_components() __lowerCamelCase : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : List[str] = output.images __lowerCamelCase : Dict = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0] __lowerCamelCase : int = image[0, -3:, -3:, -1] __lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] print(f'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) __lowerCamelCase : Dict = np.array( [0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def lowercase_ ( self ) -> List[Any]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) __lowerCamelCase : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __lowerCamelCase : str = np.ones((7_68, 7_68) , dtype=np.floataa ) __lowerCamelCase : str = 0 __lowerCamelCase : Tuple = 'a hat' __lowerCamelCase : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) __lowerCamelCase : Optional[Any] = pipeline.to(SCREAMING_SNAKE_CASE_ ) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = torch.Generator(device='cpu' ).manual_seed(0 ) __lowerCamelCase , __lowerCamelCase : str = pipe_prior( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() __lowerCamelCase : Optional[Any] = pipeline( image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , ) __lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
13
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any: super().__init__() __lowerCamelCase : Optional[Any] = initial_learning_rate __lowerCamelCase : Optional[Any] = warmup_steps __lowerCamelCase : Union[str, Any] = power __lowerCamelCase : Optional[int] = decay_schedule_fn __lowerCamelCase : Any = name def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str: with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa ) __lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa ) __lowerCamelCase : List[Any] = global_step_float / warmup_steps_float __lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> Optional[Any]: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int: __lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , ) if num_warmup_steps: __lowerCamelCase : str = WarmUp( initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , ) if weight_decay_rate > 0.0: __lowerCamelCase : List[Any] = AdamWeightDecay( learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , ) else: __lowerCamelCase : Tuple = tf.keras.optimizers.Adam( learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int: super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = weight_decay_rate __lowerCamelCase : str = include_in_weight_decay __lowerCamelCase : List[Any] = exclude_from_weight_decay @classmethod def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict: __lowerCamelCase : Any = {'WarmUp': WarmUp} return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Tuple = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) ) return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowerCamelCase : Optional[int] = apply_state or {} __lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str: __lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : Any = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None: return False return True class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self ) -> Tuple: __lowerCamelCase : Tuple = [] __lowerCamelCase : Optional[Any] = None @property def lowercase_ ( self ) -> List[str]: if self._accum_steps is None: __lowerCamelCase : Tuple = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowercase_ ( self ) -> List[str]: if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str: if not self._gradients: __lowerCamelCase : List[str] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ): raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' ) for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ ) self._accum_steps.assign_add(1 ) def lowercase_ ( self ) -> int: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
13
1
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> str: if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) __lowerCamelCase : int = str(bin(UpperCAmelCase_ ) )[2:] # remove the leading "0b" __lowerCamelCase : Any = str(bin(UpperCAmelCase_ ) )[2:] # remove the leading "0b" __lowerCamelCase : List[str] = max(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase_ ) , b_binary.zfill(UpperCAmelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any: __lowerCamelCase : Optional[Any] = parent __lowerCamelCase : int = batch_size __lowerCamelCase : Optional[int] = image_size __lowerCamelCase : Optional[int] = patch_size __lowerCamelCase : Optional[Any] = num_channels __lowerCamelCase : Dict = embed_dim __lowerCamelCase : List[Any] = depths __lowerCamelCase : int = num_heads __lowerCamelCase : Optional[Any] = window_size __lowerCamelCase : Optional[Any] = mlp_ratio __lowerCamelCase : List[str] = qkv_bias __lowerCamelCase : List[str] = hidden_dropout_prob __lowerCamelCase : int = attention_probs_dropout_prob __lowerCamelCase : List[Any] = drop_path_rate __lowerCamelCase : Any = hidden_act __lowerCamelCase : Union[str, Any] = use_absolute_embeddings __lowerCamelCase : Any = patch_norm __lowerCamelCase : Optional[Any] = layer_norm_eps __lowerCamelCase : str = initializer_range __lowerCamelCase : Dict = is_training __lowerCamelCase : Optional[Any] = scope __lowerCamelCase : Dict = use_labels __lowerCamelCase : List[str] = type_sequence_label_size __lowerCamelCase : Dict = encoder_stride __lowerCamelCase : Union[str, Any] = out_features __lowerCamelCase : str = out_indices def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : List[str] = None if self.use_labels: __lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels def lowercase_ ( self ) -> Optional[int]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: __lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : str = ['stem'] __lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs __lowerCamelCase : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase : int = False lowerCamelCase : int = False lowerCamelCase : str = False lowerCamelCase : int = False lowerCamelCase : Union[str, Any] = False def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self ) __lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def lowercase_ ( self ) -> int: pass def lowercase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase_ ( self ) -> Tuple: return def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[str]: __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ ) @unittest.skip('Swin does not use inputs_embeds' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip('Swin does not support feedforward chunking' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : str = [*signature.parameters.keys()] __lowerCamelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def lowercase_ ( self ) -> Any: pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def lowercase_ ( self ) -> List[Any]: pass def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): __lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : int = outputs.hidden_states __lowerCamelCase : Tuple = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # Swin has a different seq_length __lowerCamelCase : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __lowerCamelCase : Dict = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : Optional[int] = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : Union[str, Any] = 3 __lowerCamelCase : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __lowerCamelCase : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __lowerCamelCase : str = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : Tuple = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def lowercase_ ( self ) -> Any: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def lowercase_ ( self ) -> Union[str, Any]: pass def lowercase_ ( self ) -> Tuple: __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Any = 0 return t def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ): with torch.no_grad(): __lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple() def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:' f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has' f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.' ) , ) recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in self.all_model_classes: __lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} ) __lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} ) @require_torch class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase : List[str] = MaskFormerSwinConfig def lowercase_ ( self ) -> Tuple: __lowerCamelCase : List[str] = MaskFormerSwinModelTester(self ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ ) backbone.to(SCREAMING_SNAKE_CASE_ ) backbone.eval() __lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True __lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: __lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(outputs.attentions )
13
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : str = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) __lowerCamelCase : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) sd_pipe.set_scheduler('sample_euler' ) __lowerCamelCase : int = 'A painting of a squirrel eating a burger' __lowerCamelCase : Tuple = torch.manual_seed(0 ) __lowerCamelCase : Tuple = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) __lowerCamelCase : Union[str, Any] = output.images __lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCamelCase : List[str] = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) __lowerCamelCase : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) sd_pipe.set_scheduler('sample_euler' ) __lowerCamelCase : Union[str, Any] = 'A painting of a squirrel eating a burger' __lowerCamelCase : Any = torch.manual_seed(0 ) __lowerCamelCase : List[str] = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) __lowerCamelCase : List[str] = output.images __lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCamelCase : Tuple = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) __lowerCamelCase : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) __lowerCamelCase : Any = 'A painting of a squirrel eating a burger' __lowerCamelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCamelCase : Optional[int] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Optional[Any] = output.images __lowerCamelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCamelCase : List[Any] = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
13
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers A__ : Dict = [ """python""", """tqdm""", """regex""", """requests""", """packaging""", """filelock""", """numpy""", """tokenizers""", """huggingface-hub""", """safetensors""", """accelerate""", """pyyaml""", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]: require_version(deps[pkg] , UpperCAmelCase_ )
13
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Any = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Dict = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : str = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> str: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Dict = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : str = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : int = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Dict = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[int] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Any = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[int] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[int] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : int = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Any = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[str] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: requires_backends(self , ['sentencepiece'] )
13
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys A__ : List[str] = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
13
1
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.17.0.dev0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") A__ : str = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) lowerCamelCase : Optional[str] = field( default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , ) lowerCamelCase : int = field( default=1_0_2_4 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase : bool = field( default=_UpperCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) lowerCamelCase : bool = field( default=_UpperCAmelCase , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) lowerCamelCase : Optional[int] = field( default=_UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) lowerCamelCase : Optional[int] = field( default=_UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) lowerCamelCase : Optional[int] = field( default=_UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) lowerCamelCase : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} ) lowerCamelCase : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} ) lowerCamelCase : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} ) def lowercase_ ( self ) -> Tuple: if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' ) else: __lowerCamelCase : Dict = self.train_file.split('.' )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __lowerCamelCase : Tuple = self.validation_file.split('.' )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str = field( default=_UpperCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) lowerCamelCase : bool = field( default=_UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) lowerCamelCase : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) lowerCamelCase : bool = field( default=_UpperCAmelCase , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) def UpperCAmelCase__ ( ) -> Tuple: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) __lowerCamelCase : List[str] = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase_ ) datasets.utils.logging.set_verbosity(UpperCAmelCase_ ) transformers.utils.logging.set_verbosity(UpperCAmelCase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __lowerCamelCase : List[str] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCamelCase : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCamelCase : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __lowerCamelCase : Dict = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __lowerCamelCase : Optional[int] = data_args.train_file.split('.' )[-1] __lowerCamelCase : List[str] = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __lowerCamelCase : Dict = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(F'load a local file for {key}: {data_files[key]}' ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files __lowerCamelCase : str = load_dataset('csv' , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __lowerCamelCase : Tuple = load_dataset('json' , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __lowerCamelCase : Dict = raw_datasets['train'].features['label'].names __lowerCamelCase : str = len(UpperCAmelCase_ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __lowerCamelCase : Any = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=UpperCAmelCase_ , ) __lowerCamelCase : Union[str, Any] = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __lowerCamelCase : Any = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __lowerCamelCase : Dict = False # Some models have set the order of the labels to use, so let's make sure we do use it. __lowerCamelCase : Optional[int] = {'Refused': 0, 'Entailed': 1} __lowerCamelCase : int = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __lowerCamelCase : Dict = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(UpperCAmelCase_ : Optional[Any] ): # Tokenize the texts def _convert_table_text_to_pandas(UpperCAmelCase_ : Dict ): __lowerCamelCase : Dict = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] __lowerCamelCase : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __lowerCamelCase : Union[str, Any] = examples['statement'] __lowerCamelCase : List[str] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) __lowerCamelCase : int = tokenizer(UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) __lowerCamelCase : Optional[int] = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): __lowerCamelCase : Any = raw_datasets.map( UpperCAmelCase_ , batched=UpperCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) __lowerCamelCase : Tuple = raw_datasets['train'] if data_args.max_train_samples is not None: __lowerCamelCase : Optional[Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) __lowerCamelCase : Optional[Any] = raw_datasets['validation'] if data_args.max_eval_samples is not None: __lowerCamelCase : Optional[Any] = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) __lowerCamelCase : Union[str, Any] = raw_datasets['test'] if data_args.max_predict_samples is not None: __lowerCamelCase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(UpperCAmelCase_ ) ) , 3 ): logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(UpperCAmelCase_ : EvalPrediction ): __lowerCamelCase : Any = p.predictions[0] if isinstance(p.predictions , UpperCAmelCase_ ) else p.predictions __lowerCamelCase : Optional[Any] = np.argmax(UpperCAmelCase_ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __lowerCamelCase : Any = default_data_collator elif training_args.fpaa: __lowerCamelCase : Dict = DataCollatorWithPadding(UpperCAmelCase_ , pad_to_multiple_of=8 ) else: __lowerCamelCase : List[Any] = None # Initialize our Trainer __lowerCamelCase : Union[str, Any] = Trainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , ) # Training if training_args.do_train: __lowerCamelCase : int = None if training_args.resume_from_checkpoint is not None: __lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCamelCase : str = last_checkpoint __lowerCamelCase : Tuple = trainer.train(resume_from_checkpoint=UpperCAmelCase_ ) __lowerCamelCase : List[str] = train_result.metrics __lowerCamelCase : List[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ ) ) __lowerCamelCase : List[str] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , UpperCAmelCase_ ) trainer.save_metrics('train' , UpperCAmelCase_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowerCamelCase : Union[str, Any] = trainer.evaluate(eval_dataset=UpperCAmelCase_ ) __lowerCamelCase : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ ) __lowerCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) trainer.log_metrics('eval' , UpperCAmelCase_ ) trainer.save_metrics('eval' , UpperCAmelCase_ ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __lowerCamelCase : Optional[int] = predict_dataset.remove_columns('label' ) __lowerCamelCase : List[str] = trainer.predict(UpperCAmelCase_ , metric_key_prefix='predict' ).predictions __lowerCamelCase : Dict = np.argmax(UpperCAmelCase_ , axis=1 ) __lowerCamelCase : Any = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(UpperCAmelCase_ , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(UpperCAmelCase_ ): __lowerCamelCase : int = label_list[item] writer.write(F'{index}\t{item}\n' ) __lowerCamelCase : Dict = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase_ ) else: trainer.create_model_card(**UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
13
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""") def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) ) A__ : str = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
13
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() A__ : Union[str, Any] = logging.get_logger(__name__) A__ : List[str] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } A__ : str = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> Tuple: __lowerCamelCase : Tuple = {} with open(UpperCAmelCase_ , 'r' ) as file: for line_number, line in enumerate(UpperCAmelCase_ ): __lowerCamelCase : Any = line.strip() if line: __lowerCamelCase : Optional[int] = line.split() __lowerCamelCase : List[Any] = line_number __lowerCamelCase : int = words[0] __lowerCamelCase : List[str] = value return result def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ) -> Dict: for attribute in key.split('.' ): __lowerCamelCase : Optional[int] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase_ ): __lowerCamelCase : Dict = PARAM_MAPPING[full_name.split('.' )[-1]] __lowerCamelCase : str = 'param' if weight_type is not None and weight_type != "param": __lowerCamelCase : List[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape elif weight_type is not None and weight_type == "param": __lowerCamelCase : Any = hf_pointer for attribute in hf_param_name.split('.' ): __lowerCamelCase : Optional[int] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[Any] = shape_pointer.shape # let's reduce dimension __lowerCamelCase : Optional[int] = value[0] else: __lowerCamelCase : List[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": __lowerCamelCase : List[str] = value elif weight_type == "weight_g": __lowerCamelCase : Tuple = value elif weight_type == "weight_v": __lowerCamelCase : List[Any] = value elif weight_type == "bias": __lowerCamelCase : Any = value elif weight_type == "param": for attribute in hf_param_name.split('.' ): __lowerCamelCase : Dict = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[str] = value else: __lowerCamelCase : Tuple = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def UpperCAmelCase__ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ) -> Dict: __lowerCamelCase : Dict = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase_ ): __lowerCamelCase : Tuple = PARAM_MAPPING[full_name.split('.' )[-1]] __lowerCamelCase : Optional[Any] = 'param' if weight_type is not None and weight_type != "param": __lowerCamelCase : List[str] = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __lowerCamelCase : str = '.'.join([key, hf_param_name] ) else: __lowerCamelCase : Dict = key __lowerCamelCase : str = value if 'lm_head' in full_key else value[0] A__ : int = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=None ) -> List[Any]: __lowerCamelCase : Any = False for key, mapped_key in MAPPING.items(): __lowerCamelCase : str = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowerCamelCase : Dict = True if "*" in mapped_key: __lowerCamelCase : Union[str, Any] = name.split(UpperCAmelCase_ )[0].split('.' )[-2] __lowerCamelCase : Any = mapped_key.replace('*' , UpperCAmelCase_ ) if "weight_g" in name: __lowerCamelCase : int = 'weight_g' elif "weight_v" in name: __lowerCamelCase : Union[str, Any] = 'weight_v' elif "bias" in name: __lowerCamelCase : Any = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase : str = 'weight' else: __lowerCamelCase : List[str] = None if hf_dict is not None: rename_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return is_used return is_used def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ) -> Any: __lowerCamelCase : Optional[int] = [] __lowerCamelCase : Union[str, Any] = fairseq_model.state_dict() __lowerCamelCase : int = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase : List[Any] = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , ) __lowerCamelCase : Union[str, Any] = True else: __lowerCamelCase : Optional[int] = load_wavaveca_layer(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(F'Unused weights: {unused_weights}' ) def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ) -> Union[str, Any]: __lowerCamelCase : Tuple = full_name.split('conv_layers.' )[-1] __lowerCamelCase : str = name.split('.' ) __lowerCamelCase : Optional[int] = int(items[0] ) __lowerCamelCase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __lowerCamelCase : List[str] = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __lowerCamelCase : Dict = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) __lowerCamelCase : List[str] = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) __lowerCamelCase : Union[str, Any] = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(UpperCAmelCase_ ) @torch.no_grad() def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=False ) -> Optional[Any]: if config_path is not None: __lowerCamelCase : str = WavaVecaConfig.from_pretrained(UpperCAmelCase_ ) else: __lowerCamelCase : Any = WavaVecaConfig() if is_seq_class: __lowerCamelCase : List[str] = read_txt_into_dict(UpperCAmelCase_ ) __lowerCamelCase : List[Any] = idalabel __lowerCamelCase : str = WavaVecaForSequenceClassification(UpperCAmelCase_ ) __lowerCamelCase : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , ) feature_extractor.save_pretrained(UpperCAmelCase_ ) elif is_finetuned: if dict_path: __lowerCamelCase : str = Dictionary.load(UpperCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowerCamelCase : Any = target_dict.pad_index __lowerCamelCase : int = target_dict.bos_index __lowerCamelCase : int = target_dict.eos_index __lowerCamelCase : Optional[Any] = len(target_dict.symbols ) __lowerCamelCase : Tuple = os.path.join(UpperCAmelCase_ , 'vocab.json' ) if not os.path.isdir(UpperCAmelCase_ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase_ ) ) return os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) __lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched __lowerCamelCase : Union[str, Any] = 0 __lowerCamelCase : Optional[Any] = 1 with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = WavaVecaCTCTokenizer( UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase_ , ) __lowerCamelCase : List[str] = True if config.feat_extract_norm == 'layer' else False __lowerCamelCase : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , ) __lowerCamelCase : int = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ ) processor.save_pretrained(UpperCAmelCase_ ) __lowerCamelCase : Any = WavaVecaForCTC(UpperCAmelCase_ ) else: __lowerCamelCase : List[str] = WavaVecaForPreTraining(UpperCAmelCase_ ) if is_finetuned or is_seq_class: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: __lowerCamelCase : Union[str, Any] = argparse.Namespace(task='audio_pretraining' ) __lowerCamelCase : Optional[int] = fairseq.tasks.setup_task(UpperCAmelCase_ ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase_ ) __lowerCamelCase : List[str] = model[0].eval() recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": A__ : Dict = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) A__ : int = parser.parse_args() A__ : str = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
13
'''simple docstring''' from __future__ import annotations import os from collections.abc import Mapping A__ : Optional[Any] = tuple[int, int] class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: __lowerCamelCase : set[int] = vertices __lowerCamelCase : dict[EdgeT, int] = { (min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items() } def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) __lowerCamelCase : Union[str, Any] = weight def lowercase_ ( self ) -> Graph: __lowerCamelCase : Graph = Graph({min(self.vertices )} , {} ) __lowerCamelCase : EdgeT __lowerCamelCase : int __lowerCamelCase : EdgeT __lowerCamelCase : int while len(subgraph.vertices ) < len(self.vertices ): __lowerCamelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __lowerCamelCase : Optional[int] = edge __lowerCamelCase : List[str] = weight subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return subgraph def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int: __lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) ) __lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : dict[EdgeT, int] = {} __lowerCamelCase : list[str] __lowerCamelCase : int __lowerCamelCase : int with open(UpperCAmelCase_ ) as f: __lowerCamelCase : Any = f.read().strip().split('\n' ) __lowerCamelCase : Any = [line.split(',' ) for line in data] for edgea in range(1 , len(UpperCAmelCase_ ) ): for edgea in range(UpperCAmelCase_ ): if adjaceny_matrix[edgea][edgea] != "-": __lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] ) __lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ ) __lowerCamelCase : Graph = graph.prims_algorithm() __lowerCamelCase : int = sum(graph.edges.values() ) __lowerCamelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) __lowerCamelCase : str = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Dict = self.dummy_uncond_unet __lowerCamelCase : Any = ScoreSdeVeScheduler() __lowerCamelCase : Dict = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) sde_ve.to(SCREAMING_SNAKE_CASE_ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = torch.manual_seed(0 ) __lowerCamelCase : List[str] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE_ ).images __lowerCamelCase : List[Any] = torch.manual_seed(0 ) __lowerCamelCase : Any = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )[ 0 ] __lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> int: __lowerCamelCase : Tuple = 'google/ncsnpp-church-256' __lowerCamelCase : List[str] = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) sde_ve.to(SCREAMING_SNAKE_CASE_ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = torch.manual_seed(0 ) __lowerCamelCase : int = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE_ ).images __lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase : Optional[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
13
'''simple docstring''' from collections.abc import Generator from math import sin def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: if len(UpperCAmelCase_ ) != 32: raise ValueError('Input must be of length 32' ) __lowerCamelCase : Dict = B'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes: if i < 0: raise ValueError('Input must be non-negative' ) __lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:] __lowerCamelCase : str = B'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: __lowerCamelCase : Optional[Any] = B'' for char in message: bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' ) __lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCAmelCase_ ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]: if len(UpperCAmelCase_ ) % 5_12 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ): __lowerCamelCase : Any = bit_string[pos : pos + 5_12] __lowerCamelCase : Optional[int] = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: if i < 0: raise ValueError('Input must be non-negative' ) __lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' ) __lowerCamelCase : Optional[int] = '' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCAmelCase_ , 2 ) def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: return (a + b) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: __lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __lowerCamelCase : Dict = 0x67_45_23_01 __lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89 __lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe __lowerCamelCase : Union[str, Any] = 0x10_32_54_76 __lowerCamelCase : List[str] = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCAmelCase_ ): __lowerCamelCase : Dict = aa __lowerCamelCase : Tuple = ba __lowerCamelCase : List[Any] = ca __lowerCamelCase : Dict = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __lowerCamelCase : List[str] = d ^ (b & (c ^ d)) __lowerCamelCase : Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __lowerCamelCase : Optional[int] = c ^ (d & (b ^ c)) __lowerCamelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: __lowerCamelCase : str = b ^ c ^ d __lowerCamelCase : Any = (3 * i + 5) % 16 else: __lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ )) __lowerCamelCase : int = (7 * i) % 16 __lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32 __lowerCamelCase : Optional[Any] = d __lowerCamelCase : Tuple = c __lowerCamelCase : Optional[int] = b __lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) ) # Add hashed chunk to running total __lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys A__ : List[str] = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
13
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : Dict = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = 'rwkv' lowerCamelCase : Any = {'max_position_embeddings': 'context_length'} def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: __lowerCamelCase : Optional[int] = vocab_size __lowerCamelCase : Tuple = context_length __lowerCamelCase : str = hidden_size __lowerCamelCase : List[str] = num_hidden_layers __lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size __lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size __lowerCamelCase : Optional[Any] = layer_norm_epsilon __lowerCamelCase : int = rescale_every __lowerCamelCase : Tuple = use_cache __lowerCamelCase : int = bos_token_id __lowerCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ : List[Any] = { """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int = [ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys A__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int: __lowerCamelCase : Union[str, Any] = 3 __lowerCamelCase : Dict = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Dict = XGLMConfig lowerCamelCase : List[str] = {} lowerCamelCase : Union[str, Any] = 'gelu' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any: __lowerCamelCase : int = parent __lowerCamelCase : Optional[int] = batch_size __lowerCamelCase : Optional[Any] = seq_length __lowerCamelCase : Optional[int] = is_training __lowerCamelCase : str = use_input_mask __lowerCamelCase : Dict = use_labels __lowerCamelCase : Union[str, Any] = vocab_size __lowerCamelCase : List[Any] = d_model __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : Optional[Any] = ffn_dim __lowerCamelCase : List[Any] = activation_function __lowerCamelCase : List[Any] = activation_dropout __lowerCamelCase : List[Any] = attention_dropout __lowerCamelCase : Union[str, Any] = max_position_embeddings __lowerCamelCase : Tuple = initializer_range __lowerCamelCase : int = None __lowerCamelCase : int = 0 __lowerCamelCase : Tuple = 2 __lowerCamelCase : Tuple = 1 def lowercase_ ( self ) -> Any: return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Optional[Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) __lowerCamelCase : Optional[int] = None if self.use_input_mask: __lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase : str = self.get_config() __lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase_ ( self ) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> str: __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : str = config_and_inputs __lowerCamelCase : Union[str, Any] = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase : Any = ( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase : List[Any] = False lowerCamelCase : Dict = False lowerCamelCase : Union[str, Any] = False def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : str = TFXGLMModelTester(self ) __lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 ) def lowercase_ ( self ) -> Dict: self.config_tester.run_common_tests() @slow def lowercase_ ( self ) -> Optional[int]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def lowercase_ ( self ) -> Any: super().test_resize_token_embeddings() @require_tf class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]: __lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on __lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ ) @slow def lowercase_ ( self ) -> int: __lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) __lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' ) __lowerCamelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): __lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] ) __lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def lowercase_ ( self ) -> int: __lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Any = 'left' # use different length sentences to test batching __lowerCamelCase : Any = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = inputs['input_ids'] __lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 ) __lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids __lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 ) __lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids __lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 ) __lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
13
1
'''simple docstring''' import requests from bsa import BeautifulSoup def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict ) -> str: __lowerCamelCase : List[Any] = BeautifulSoup(requests.get(UpperCAmelCase_ , params=UpperCAmelCase_ ).content , 'html.parser' ) __lowerCamelCase : Any = soup.find('div' , attrs={'class': 'gs_ri'} ) __lowerCamelCase : Any = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": A__ : int = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 2018, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
13
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : List[str] = logging.get_logger(__name__) # TODO Update this A__ : Tuple = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = 'esm' def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = vocab_size __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : str = num_hidden_layers __lowerCamelCase : List[str] = num_attention_heads __lowerCamelCase : Any = intermediate_size __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : Optional[int] = max_position_embeddings __lowerCamelCase : str = initializer_range __lowerCamelCase : Optional[int] = layer_norm_eps __lowerCamelCase : List[str] = position_embedding_type __lowerCamelCase : int = use_cache __lowerCamelCase : Optional[Any] = emb_layer_norm_before __lowerCamelCase : Optional[Any] = token_dropout __lowerCamelCase : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) __lowerCamelCase : Dict = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) __lowerCamelCase : List[str] = get_default_vocab_list() else: __lowerCamelCase : Optional[Any] = vocab_list else: __lowerCamelCase : Dict = None __lowerCamelCase : Optional[Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def lowercase_ ( self ) -> Any: __lowerCamelCase : Any = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : int = self.esmfold_config.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str = None lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : float = 0 lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : int = 1_2_8 lowerCamelCase : "TrunkConfig" = None def lowercase_ ( self ) -> Any: if self.trunk is None: __lowerCamelCase : List[str] = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Any = TrunkConfig(**self.trunk ) def lowercase_ ( self ) -> int: __lowerCamelCase : Optional[int] = asdict(self ) __lowerCamelCase : str = self.trunk.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 4_8 lowerCamelCase : int = 1_0_2_4 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 3_2 lowerCamelCase : int = 3_2 lowerCamelCase : int = 3_2 lowerCamelCase : float = 0 lowerCamelCase : float = 0 lowerCamelCase : bool = False lowerCamelCase : int = 4 lowerCamelCase : Optional[int] = 1_2_8 lowerCamelCase : "StructureModuleConfig" = None def lowercase_ ( self ) -> Optional[int]: if self.structure_module is None: __lowerCamelCase : Dict = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' f' {self.sequence_state_dim} and {self.sequence_state_dim}.' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' ) __lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width __lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' ) if self.dropout >= 0.4: raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : List[str] = asdict(self ) __lowerCamelCase : int = self.structure_module.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 3_8_4 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 1_6 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 1_2 lowerCamelCase : int = 4 lowerCamelCase : int = 8 lowerCamelCase : float = 0.1 lowerCamelCase : int = 8 lowerCamelCase : int = 1 lowerCamelCase : int = 2 lowerCamelCase : int = 7 lowerCamelCase : int = 1_0 lowerCamelCase : float = 1e-8 lowerCamelCase : float = 1e5 def lowercase_ ( self ) -> Any: return asdict(self ) def UpperCAmelCase__ ( ) -> Optional[Any]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
13
1
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any: super().__init__() __lowerCamelCase : Optional[Any] = initial_learning_rate __lowerCamelCase : Optional[Any] = warmup_steps __lowerCamelCase : Union[str, Any] = power __lowerCamelCase : Optional[int] = decay_schedule_fn __lowerCamelCase : Any = name def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str: with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa ) __lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa ) __lowerCamelCase : List[Any] = global_step_float / warmup_steps_float __lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> Optional[Any]: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int: __lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , ) if num_warmup_steps: __lowerCamelCase : str = WarmUp( initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , ) if weight_decay_rate > 0.0: __lowerCamelCase : List[Any] = AdamWeightDecay( learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , ) else: __lowerCamelCase : Tuple = tf.keras.optimizers.Adam( learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int: super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = weight_decay_rate __lowerCamelCase : str = include_in_weight_decay __lowerCamelCase : List[Any] = exclude_from_weight_decay @classmethod def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict: __lowerCamelCase : Any = {'WarmUp': WarmUp} return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Tuple = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) ) return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowerCamelCase : Optional[int] = apply_state or {} __lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str: __lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : Any = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None: return False return True class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self ) -> Tuple: __lowerCamelCase : Tuple = [] __lowerCamelCase : Optional[Any] = None @property def lowercase_ ( self ) -> List[str]: if self._accum_steps is None: __lowerCamelCase : Tuple = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowercase_ ( self ) -> List[str]: if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str: if not self._gradients: __lowerCamelCase : List[str] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ): raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' ) for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ ) self._accum_steps.assign_add(1 ) def lowercase_ ( self ) -> int: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
13
'''simple docstring''' A__ : dict[tuple[int, int, int], int] = {} def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on __lowerCamelCase : List[Any] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one __lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 __lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter __lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 ) __lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime __lowerCamelCase : Union[str, Any] = prizestrings return prizestrings def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int: return _calculate(UpperCAmelCase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
13
1
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> int: __lowerCamelCase : List[str] = {} def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> None: __lowerCamelCase : Optional[int] = {} def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE_ ) if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = probability def lowercase_ ( self ) -> list[str]: return list(self.connections ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str: __lowerCamelCase : Optional[Any] = 0 __lowerCamelCase : Optional[Any] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[tuple[str, str, float]] , UpperCAmelCase_ : int ) -> dict[str, int]: __lowerCamelCase : Any = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Optional[int] = Counter(graph.get_nodes() ) __lowerCamelCase : Optional[int] = start for _ in range(UpperCAmelCase_ ): __lowerCamelCase : List[Any] = graph.transition(UpperCAmelCase_ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str lowerCamelCase : Optional[str] = None lowerCamelCase : Optional[Union[str, int]] = None lowerCamelCase : Optional[Union[str, int]] = None lowerCamelCase : Optional[Union[str, int]] = None def lowercase_ ( self ) -> List[str]: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self ) -> Any: return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def lowercase_ ( self ) -> int: return self.major, self.minor, self.patch def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return Version(SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return other raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' ) def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: try: __lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ ) return self.tuple < other.tuple def __hash__( self ) -> List[str]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase_ ( self ) -> str: return self.version_str def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str: __lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict: return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
13
1
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00_00_00 , UpperCAmelCase_ : int = 10 ) -> int: __lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __lowerCamelCase : Optional[int] = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __lowerCamelCase : List[str] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(UpperCAmelCase_ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
13
'''simple docstring''' import sys from collections import defaultdict class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> int: __lowerCamelCase : Any = [] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any: return self.node_position[vertex] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: __lowerCamelCase : Optional[int] = pos def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase : str = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase : Optional[Any] = 2 * start + 1 else: __lowerCamelCase : int = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase : int = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase : str = temp, tempa __lowerCamelCase : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ ) self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Any = position[index] while index != 0: __lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase : Union[str, Any] = heap[parent] __lowerCamelCase : Any = position[parent] self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase : Tuple = val __lowerCamelCase : List[str] = temp self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) break __lowerCamelCase : Tuple = parent else: __lowerCamelCase : Union[str, Any] = val __lowerCamelCase : Tuple = temp self.set_position(SCREAMING_SNAKE_CASE_ , 0 ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: __lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1 for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ): self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : Any = positions[0] __lowerCamelCase : Union[str, Any] = sys.maxsize self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return temp def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str: __lowerCamelCase : List[Any] = Heap() __lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ ) __lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase : Tuple = [] for vertex in range(len(UpperCAmelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCAmelCase_ ) heap.node_position.append(UpperCAmelCase_ ) __lowerCamelCase : Tuple = [] __lowerCamelCase : Dict = 1 __lowerCamelCase : str = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase : Any = 0 __lowerCamelCase : Any = distance heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ ) for _ in range(1 , len(UpperCAmelCase_ ) ): __lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase : Union[str, Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCAmelCase_ )] ): __lowerCamelCase : Dict = distance heap.bottom_to_top( UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > A__ : Tuple = int(input("""Enter number of edges: """).strip()) A__ : str = defaultdict(list) for _ in range(edges_number): A__ : Optional[int] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
1
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : int lowerCamelCase : float = 0.0 lowerCamelCase : int = 1 lowerCamelCase : int = 1 lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Any: __lowerCamelCase : Any = [] __lowerCamelCase : List[str] = [] for i in range(self.num_layers ): __lowerCamelCase : Optional[Any] = self.in_channels if i == 0 else self.out_channels __lowerCamelCase : Optional[int] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = resnets __lowerCamelCase : str = attentions if self.add_downsample: __lowerCamelCase : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Dict: __lowerCamelCase : Dict = () for resnet, attn in zip(self.resnets , self.attentions ): __lowerCamelCase : List[Any] = resnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = attn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE_ ) output_states += (hidden_states,) return hidden_states, output_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : int lowerCamelCase : float = 0.0 lowerCamelCase : int = 1 lowerCamelCase : bool = True lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Tuple = [] for i in range(self.num_layers ): __lowerCamelCase : Optional[int] = self.in_channels if i == 0 else self.out_channels __lowerCamelCase : Union[str, Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = resnets if self.add_downsample: __lowerCamelCase : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Union[str, Any]: __lowerCamelCase : int = () for resnet in self.resnets: __lowerCamelCase : Dict = resnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase : int = self.downsamplers_a(SCREAMING_SNAKE_CASE_ ) output_states += (hidden_states,) return hidden_states, output_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : int lowerCamelCase : int lowerCamelCase : float = 0.0 lowerCamelCase : int = 1 lowerCamelCase : int = 1 lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Dict: __lowerCamelCase : Optional[Any] = [] __lowerCamelCase : Dict = [] for i in range(self.num_layers ): __lowerCamelCase : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase : Optional[Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = resnets __lowerCamelCase : Optional[Any] = attentions if self.add_upsample: __lowerCamelCase : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple: for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states __lowerCamelCase : int = res_hidden_states_tuple[-1] __lowerCamelCase : str = res_hidden_states_tuple[:-1] __lowerCamelCase : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase : List[str] = resnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = attn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ ) if self.add_upsample: __lowerCamelCase : Tuple = self.upsamplers_a(SCREAMING_SNAKE_CASE_ ) return hidden_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : int lowerCamelCase : int lowerCamelCase : float = 0.0 lowerCamelCase : int = 1 lowerCamelCase : bool = True lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> str: __lowerCamelCase : Tuple = [] for i in range(self.num_layers ): __lowerCamelCase : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase : str = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase : Optional[int] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = resnets if self.add_upsample: __lowerCamelCase : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> List[Any]: for resnet in self.resnets: # pop res hidden states __lowerCamelCase : List[Any] = res_hidden_states_tuple[-1] __lowerCamelCase : Any = res_hidden_states_tuple[:-1] __lowerCamelCase : Any = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase : Optional[int] = resnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ ) if self.add_upsample: __lowerCamelCase : int = self.upsamplers_a(SCREAMING_SNAKE_CASE_ ) return hidden_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : float = 0.0 lowerCamelCase : int = 1 lowerCamelCase : int = 1 lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Optional[Any]: # there is always at least one resnet __lowerCamelCase : Dict = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] __lowerCamelCase : List[str] = [] for _ in range(self.num_layers ): __lowerCamelCase : List[str] = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = resnets __lowerCamelCase : Optional[int] = attentions def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Dict: __lowerCamelCase : List[str] = self.resnets[0](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): __lowerCamelCase : Optional[int] = attn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = resnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ ) return hidden_states
13
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int: __lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6 __lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser A__ : int = logging.getLogger(__name__) torch.set_grad_enabled(False) A__ : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu""" def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]=1_00 , UpperCAmelCase_ : Any=" " ) -> List[str]: __lowerCamelCase : Any = text.split(UpperCAmelCase_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )] def UpperCAmelCase__ ( UpperCAmelCase_ : dict ) -> dict: __lowerCamelCase , __lowerCamelCase : List[Any] = [], [] for title, text in zip(documents['title'] , documents['text'] ): if text is not None: for passage in split_text(UpperCAmelCase_ ): titles.append(title if title is not None else '' ) texts.append(UpperCAmelCase_ ) return {"title": titles, "text": texts} def UpperCAmelCase__ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : DPRContextEncoder , UpperCAmelCase_ : DPRContextEncoderTokenizerFast ) -> dict: __lowerCamelCase : Union[str, Any] = ctx_tokenizer( documents['title'] , documents['text'] , truncation=UpperCAmelCase_ , padding='longest' , return_tensors='pt' )['input_ids'] __lowerCamelCase : Tuple = ctx_encoder(input_ids.to(device=UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def UpperCAmelCase__ ( UpperCAmelCase_ : "RagExampleArguments" , UpperCAmelCase_ : "ProcessingArguments" , UpperCAmelCase_ : "IndexHnswArguments" , ) -> Dict: ###################################### logger.info('Step 1 - Create the dataset' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way __lowerCamelCase : int = load_dataset( 'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words __lowerCamelCase : str = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=processing_args.num_proc ) # And compute the embeddings __lowerCamelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase_ ) __lowerCamelCase : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) __lowerCamelCase : Any = Features( {'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space __lowerCamelCase : List[str] = dataset.map( partial(UpperCAmelCase_ , ctx_encoder=UpperCAmelCase_ , ctx_tokenizer=UpperCAmelCase_ ) , batched=UpperCAmelCase_ , batch_size=processing_args.batch_size , features=UpperCAmelCase_ , ) # And finally save your dataset __lowerCamelCase : Dict = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' ) dataset.save_to_disk(UpperCAmelCase_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('Step 2 - Index the dataset' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search __lowerCamelCase : int = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('embeddings' , custom_index=UpperCAmelCase_ ) # And save the index __lowerCamelCase : Union[str, Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' ) dataset.get_index('embeddings' ).save(UpperCAmelCase_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str = field( default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowerCamelCase : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowerCamelCase : str = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowerCamelCase : str = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowerCamelCase : Optional[str] = field( default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[int] = field( default=_UpperCAmelCase , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowerCamelCase : int = field( default=1_6 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = field( default=7_6_8 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowerCamelCase : int = field( default=1_2_8 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) A__ : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) A__ , A__ , A__ : Tuple = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: A__ : List[str] = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
13
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]: __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Dict = batch_size __lowerCamelCase : int = image_size __lowerCamelCase : List[str] = patch_size __lowerCamelCase : Optional[int] = num_channels __lowerCamelCase : Any = is_training __lowerCamelCase : Dict = use_labels __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Optional[Any] = num_attention_heads __lowerCamelCase : Dict = intermediate_size __lowerCamelCase : Union[str, Any] = hidden_act __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : str = type_sequence_label_size __lowerCamelCase : List[str] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (image_size // patch_size) ** 2 __lowerCamelCase : Optional[int] = num_patches + 1 def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : Optional[int] = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (self.image_size, self.image_size) __lowerCamelCase : str = (self.patch_size, self.patch_size) __lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: __lowerCamelCase : Tuple = self.type_sequence_label_size __lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCamelCase : List[str] = 1 __lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : int = config_and_inputs __lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowercase_ ( self ) -> None: __lowerCamelCase : str = FlaxViTModelTester(self ) __lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def lowercase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : List[str] = [*signature.parameters.keys()] __lowerCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest('JIT Enabled' ): __lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self ) -> List[Any]: for model_class_name in self.all_model_classes: __lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' ) __lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A__ : Optional[int] = { """configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""], """tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ """TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """AdaptiveEmbedding""", """TransfoXLForSequenceClassification""", """TransfoXLLMHeadModel""", """TransfoXLModel""", """TransfoXLPreTrainedModel""", """load_tf_weights_in_transfo_xl""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : List[Any] = [ """TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAdaptiveEmbedding""", """TFTransfoXLForSequenceClassification""", """TFTransfoXLLMHeadModel""", """TFTransfoXLMainLayer""", """TFTransfoXLModel""", """TFTransfoXLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys A__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import argparse A__ : Optional[Any] = """docs/source/_static/js/custom.js""" def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int: with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f: __lowerCamelCase : Dict = f.readlines() __lowerCamelCase : Tuple = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 __lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += F' "v{version}": "v{version}",\n' with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(UpperCAmelCase_ ) if __name__ == "__main__": A__ : str = argparse.ArgumentParser() parser.add_argument("""--version""", help="""Release version.""") A__ : Any = parser.parse_args() update_custom_js(args.version)
13
1
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bool: return str(UpperCAmelCase_ ) == str(UpperCAmelCase_ )[::-1] def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return int(UpperCAmelCase_ ) + int(str(UpperCAmelCase_ )[::-1] ) def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00_00 ) -> int: __lowerCamelCase : Tuple = [] for num in range(1 , UpperCAmelCase_ ): __lowerCamelCase : int = 0 __lowerCamelCase : Optional[int] = num while iterations < 50: __lowerCamelCase : Union[str, Any] = sum_reverse(UpperCAmelCase_ ) iterations += 1 if is_palindrome(UpperCAmelCase_ ): break else: lychrel_nums.append(UpperCAmelCase_ ) return len(UpperCAmelCase_ ) if __name__ == "__main__": print(f'''{solution() = }''')
13
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape __lowerCamelCase : Dict = jax.image.resize( SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) __lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ ) return hidden_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> List[str]: __lowerCamelCase : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ ) return hidden_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : int = None lowerCamelCase : float = 0.0 lowerCamelCase : bool = None lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels __lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __lowerCamelCase : Tuple = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype ) __lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __lowerCamelCase : int = nn.Dropout(self.dropout_prob ) __lowerCamelCase : Union[str, Any] = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __lowerCamelCase : List[Any] = None if use_nin_shortcut: __lowerCamelCase : Any = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple: __lowerCamelCase : List[Any] = hidden_states __lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 ) __lowerCamelCase : Optional[int] = hidden_states + temb __lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ ) if self.conv_shortcut is not None: __lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ ) return hidden_states + residual
13
1
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer A__ : Optional[int] = """bart""" A__ : Optional[int] = True @st.cache(allow_output_mutation=UpperCAmelCase_ ) def UpperCAmelCase__ ( ) -> Optional[Any]: if LOAD_DENSE_INDEX: __lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) __lowerCamelCase : Optional[int] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) __lowerCamelCase : int = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase : Union[str, Any] = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) __lowerCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) __lowerCamelCase : Dict = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) __lowerCamelCase : Optional[Any] = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase : List[Any] = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=UpperCAmelCase_ ) def UpperCAmelCase__ ( ) -> int: if LOAD_DENSE_INDEX: __lowerCamelCase : List[str] = faiss.StandardGpuResources() __lowerCamelCase : Any = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train'] __lowerCamelCase : List[Any] = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_28) , ) __lowerCamelCase : Any = faiss.IndexFlatIP(1_28 ) __lowerCamelCase : Dict = faiss.index_cpu_to_gpu(UpperCAmelCase_ , 1 , UpperCAmelCase_ ) wikiaab_gpu_index_flat.add(UpperCAmelCase_ ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase : Dict = (None, None) __lowerCamelCase : Union[str, Any] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=UpperCAmelCase_ ) def UpperCAmelCase__ ( ) -> Dict: __lowerCamelCase : Union[str, Any] = datasets.load_dataset('eli5' , name='LFQA_reddit' ) __lowerCamelCase : Union[str, Any] = elia['train_eli5'] __lowerCamelCase : List[Any] = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_28) ) __lowerCamelCase : int = faiss.IndexFlatIP(1_28 ) eli5_train_q_index.add(UpperCAmelCase_ ) return (elia_train, eli5_train_q_index) A__ , A__ , A__ : Dict = load_indexes() A__ , A__ , A__ , A__ : str = load_models() A__ , A__ : Dict = load_train_data() def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple=10 ) -> List[str]: __lowerCamelCase : List[str] = embed_questions_for_retrieval([question] , UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase , __lowerCamelCase : int = eli5_train_q_index.search(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = [elia_train[int(UpperCAmelCase_ )] for i in I[0]] return nn_examples def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple="wiki40b" , UpperCAmelCase_ : Dict="dense" , UpperCAmelCase_ : int=10 ) -> Any: if source == "none": __lowerCamelCase , __lowerCamelCase : Optional[Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase : Optional[int] = query_qa_dense_index( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: __lowerCamelCase , __lowerCamelCase : Optional[Any] = query_es_index( UpperCAmelCase_ , UpperCAmelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=UpperCAmelCase_ , ) __lowerCamelCase : Tuple = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] __lowerCamelCase : Any = 'question: {} context: {}'.format(UpperCAmelCase_ , UpperCAmelCase_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda UpperCAmelCase_ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCAmelCase_ : None), } ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=64 , UpperCAmelCase_ : Tuple=2_56 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=0.95 , UpperCAmelCase_ : Union[str, Any]=0.8 ) -> Optional[int]: with torch.no_grad(): __lowerCamelCase : Any = qa_sas_generate( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , num_answers=1 , num_beams=UpperCAmelCase_ , min_len=UpperCAmelCase_ , max_len=UpperCAmelCase_ , do_sample=UpperCAmelCase_ , temp=UpperCAmelCase_ , top_p=UpperCAmelCase_ , top_k=UpperCAmelCase_ , max_input_length=10_24 , device='cuda:0' , )[0] return (answer, support_list) st.title("""Long Form Question Answering with ELI5""") # Start sidebar A__ : Optional[Any] = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>""" A__ : List[Any] = """ <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class=\"img-container\"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia A__ : Any = """ This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) A__ : Optional[Any] = [ """Answer the question""", """View the retrieved document only""", """View the most similar ELI5 question and answer""", """Show me everything, please!""", ] A__ : List[str] = st.sidebar.checkbox("""Demo options""") if demo_options: A__ : List[Any] = st.sidebar.selectbox( """""", action_list, index=3, ) A__ : str = action_list.index(action_st) A__ : str = st.sidebar.selectbox( """""", ["""Show full text of passages""", """Show passage section titles"""], index=0, ) A__ : List[str] = show_type == """Show full text of passages""" else: A__ : Optional[Any] = 3 A__ : Optional[int] = True A__ : Optional[int] = st.sidebar.checkbox("""Retrieval options""") if retrieval_options: A__ : Optional[int] = """ ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) A__ : List[str] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""]) A__ : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""]) else: A__ : Union[str, Any] = """wiki40b""" A__ : Union[str, Any] = """dense""" A__ : Dict = """beam""" A__ : Dict = 2 A__ : List[Any] = 64 A__ : Dict = 256 A__ : Dict = None A__ : Optional[int] = None A__ : List[str] = st.sidebar.checkbox("""Generation options""") if generate_options: A__ : Any = """ ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) A__ : Dict = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""]) A__ : int = st.sidebar.slider( """Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) A__ : Optional[int] = st.sidebar.slider( """Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": A__ : List[Any] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: A__ : Any = st.sidebar.slider( """Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) A__ : int = st.sidebar.slider( """Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) A__ : int = None # start main text A__ : int = [ """<MY QUESTION>""", """How do people make chocolate?""", """Why do we get a fever when we are sick?""", """How can different animals perceive different colors?""", """What is natural language processing?""", """What's the best way to treat a sunburn?""", """What exactly are vitamins ?""", """How does nuclear energy provide electricity?""", """What's the difference between viruses and bacteria?""", """Why are flutes classified as woodwinds when most of them are made out of metal ?""", """Why do people like drinking coffee even though it tastes so bad?""", """What happens when wine ages? How does it make the wine taste better?""", """If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""", """How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""", """How does New Zealand have so many large bird predators?""", ] A__ : int = st.selectbox( """What would you like to ask? ---- select <MY QUESTION> to enter a new query""", questions_list, index=1, ) if question_s == "<MY QUESTION>": A__ : List[Any] = st.text_input("""Enter your question here:""", """""") else: A__ : Dict = question_s if st.button("""Show me!"""): if action in [0, 1, 3]: if index_type == "mixed": A__ , A__ : Optional[int] = make_support(question, source=wiki_source, method="""dense""", n_results=10) A__ , A__ : Tuple = make_support(question, source=wiki_source, method="""sparse""", n_results=10) A__ : str = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] A__ : Union[str, Any] = support_list[:10] A__ : List[str] = """<P> """ + """ <P> """.join([res[-1] for res in support_list]) else: A__ , A__ : int = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: A__ , A__ : Tuple = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == """sampled"""), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("""### The model generated answer is:""") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""") for i, res in enumerate(support_list): A__ : List[Any] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_""")) A__ : int = res[1].strip() if sec_titles == "": A__ : int = """[{}]({})""".format(res[0], wiki_url) else: A__ : Union[str, Any] = sec_titles.split(""" & """) A__ : Optional[int] = """ & """.join( ["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list] ) st.markdown( """{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( """> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True ) if action in [2, 3]: A__ : Optional[Any] = find_nearest_training(question) A__ : str = nn_train_list[0] st.markdown( """--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""]) ) A__ : Optional[Any] = [ """{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""])) for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""])) if i == 0 or sc > 2 ] st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st))) A__ : Optional[Any] = """ --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
13
'''simple docstring''' from __future__ import annotations A__ : int = 10 def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]: __lowerCamelCase : List[Any] = 1 __lowerCamelCase : Any = max(UpperCAmelCase_ ) while placement <= max_digit: # declare and initialize empty buckets __lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )] # split list_of_ints between the buckets for i in list_of_ints: __lowerCamelCase : List[Any] = int((i / placement) % RADIX ) buckets[tmp].append(UpperCAmelCase_ ) # put each buckets' contents into list_of_ints __lowerCamelCase : Tuple = 0 for b in range(UpperCAmelCase_ ): for i in buckets[b]: __lowerCamelCase : List[Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ : Optional[int] = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[Any] = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys A__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
'''simple docstring''' from collections import defaultdict from math import gcd def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int: __lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ ) __lowerCamelCase : Any = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ): if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1: continue __lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' from math import loga def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError('Input value must be a \'int\' type' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A__ : str = logging.get_logger(__name__) A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} A__ : Tuple = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } A__ : str = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } A__ : Tuple = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION lowerCamelCase : Dict = RoFormerTokenizer def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents ): __lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) ) __lowerCamelCase : Union[str, Any] = do_lower_case __lowerCamelCase : str = strip_accents __lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = do_lower_case def __getstate__( self ) -> List[str]: __lowerCamelCase : Union[str, Any] = self.__dict__.copy() __lowerCamelCase : Dict = BertPreTokenizer() return state def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: __lowerCamelCase : Optional[int] = d __lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab() __lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str: __lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]: __lowerCamelCase : List[str] = [self.sep_token_id] __lowerCamelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: __lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any: __lowerCamelCase : Tuple = BertPreTokenizer() return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A__ : int = logging.get_logger(__name__) A__ : Optional[Any] = { """ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""", } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = 'deta' lowerCamelCase : Tuple = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=9_00 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="sine" , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=3_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.2_5 , **SCREAMING_SNAKE_CASE_ , ) -> Any: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) __lowerCamelCase : Any = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] ) else: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Dict = backbone_config.pop('model_type' ) __lowerCamelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = backbone_config __lowerCamelCase : Dict = num_queries __lowerCamelCase : Dict = max_position_embeddings __lowerCamelCase : Tuple = d_model __lowerCamelCase : Optional[int] = encoder_ffn_dim __lowerCamelCase : Dict = encoder_layers __lowerCamelCase : Optional[int] = encoder_attention_heads __lowerCamelCase : str = decoder_ffn_dim __lowerCamelCase : int = decoder_layers __lowerCamelCase : int = decoder_attention_heads __lowerCamelCase : List[str] = dropout __lowerCamelCase : List[Any] = attention_dropout __lowerCamelCase : Union[str, Any] = activation_dropout __lowerCamelCase : str = activation_function __lowerCamelCase : Optional[Any] = init_std __lowerCamelCase : Optional[Any] = init_xavier_std __lowerCamelCase : Optional[int] = encoder_layerdrop __lowerCamelCase : str = auxiliary_loss __lowerCamelCase : Union[str, Any] = position_embedding_type # deformable attributes __lowerCamelCase : int = num_feature_levels __lowerCamelCase : List[str] = encoder_n_points __lowerCamelCase : Optional[Any] = decoder_n_points __lowerCamelCase : Tuple = two_stage __lowerCamelCase : List[str] = two_stage_num_proposals __lowerCamelCase : Optional[Any] = with_box_refine __lowerCamelCase : str = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.' ) # Hungarian matcher __lowerCamelCase : List[Any] = class_cost __lowerCamelCase : str = bbox_cost __lowerCamelCase : Optional[Any] = giou_cost # Loss coefficients __lowerCamelCase : Optional[int] = mask_loss_coefficient __lowerCamelCase : Any = dice_loss_coefficient __lowerCamelCase : Optional[int] = bbox_loss_coefficient __lowerCamelCase : str = giou_loss_coefficient __lowerCamelCase : Tuple = eos_coefficient __lowerCamelCase : str = focal_alpha super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def lowercase_ ( self ) -> int: return self.encoder_attention_heads @property def lowercase_ ( self ) -> int: return self.d_model def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[str] = copy.deepcopy(self.__dict__ ) __lowerCamelCase : Optional[Any] = self.backbone_config.to_dict() __lowerCamelCase : Optional[int] = self.__class__.model_type return output
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer A__ : int = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast A__ : Dict = TaTokenizerFast A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys A__ : Union[str, Any] = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' from graphs.minimum_spanning_tree_kruskal import kruskal def UpperCAmelCase__ ( ) -> str: __lowerCamelCase : Optional[Any] = 9 __lowerCamelCase : str = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __lowerCamelCase : Union[str, Any] = kruskal(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[str] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(UpperCAmelCase_ ) == sorted(UpperCAmelCase_ )
13
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any: super().__init__() __lowerCamelCase : Optional[Any] = initial_learning_rate __lowerCamelCase : Optional[Any] = warmup_steps __lowerCamelCase : Union[str, Any] = power __lowerCamelCase : Optional[int] = decay_schedule_fn __lowerCamelCase : Any = name def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str: with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa ) __lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa ) __lowerCamelCase : List[Any] = global_step_float / warmup_steps_float __lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> Optional[Any]: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int: __lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , ) if num_warmup_steps: __lowerCamelCase : str = WarmUp( initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , ) if weight_decay_rate > 0.0: __lowerCamelCase : List[Any] = AdamWeightDecay( learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , ) else: __lowerCamelCase : Tuple = tf.keras.optimizers.Adam( learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int: super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = weight_decay_rate __lowerCamelCase : str = include_in_weight_decay __lowerCamelCase : List[Any] = exclude_from_weight_decay @classmethod def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict: __lowerCamelCase : Any = {'WarmUp': WarmUp} return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Tuple = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) ) return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowerCamelCase : Optional[int] = apply_state or {} __lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str: __lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : Any = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None: return False return True class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self ) -> Tuple: __lowerCamelCase : Tuple = [] __lowerCamelCase : Optional[Any] = None @property def lowercase_ ( self ) -> List[str]: if self._accum_steps is None: __lowerCamelCase : Tuple = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowercase_ ( self ) -> List[str]: if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str: if not self._gradients: __lowerCamelCase : List[str] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ): raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' ) for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ ) self._accum_steps.assign_add(1 ) def lowercase_ ( self ) -> int: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
13
1
'''simple docstring''' import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated A__ : List[Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ A__ : Union[str, Any] = """https://storage.googleapis.com/cvdf-datasets/mnist/""" def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Any: __lowerCamelCase : Union[str, Any] = numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase_ )[0] @deprecated(UpperCAmelCase_ , 'Please use tf.data to implement this functionality.' ) def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> List[Any]: print('Extracting' , f.name ) with gzip.GzipFile(fileobj=UpperCAmelCase_ ) as bytestream: __lowerCamelCase : str = _readaa(UpperCAmelCase_ ) if magic != 20_51: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) __lowerCamelCase : Dict = _readaa(UpperCAmelCase_ ) __lowerCamelCase : List[Any] = _readaa(UpperCAmelCase_ ) __lowerCamelCase : Optional[int] = _readaa(UpperCAmelCase_ ) __lowerCamelCase : Tuple = bytestream.read(rows * cols * num_images ) __lowerCamelCase : Union[str, Any] = numpy.frombuffer(UpperCAmelCase_ , dtype=numpy.uinta ) __lowerCamelCase : Any = data.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 1 ) return data @deprecated(UpperCAmelCase_ , 'Please use tf.one_hot on tensors.' ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ) -> Optional[Any]: __lowerCamelCase : Any = labels_dense.shape[0] __lowerCamelCase : Optional[int] = numpy.arange(UpperCAmelCase_ ) * num_classes __lowerCamelCase : List[str] = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase : Optional[Any] = 1 return labels_one_hot @deprecated(UpperCAmelCase_ , 'Please use tf.data to implement this functionality.' ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[str]=10 ) -> str: print('Extracting' , f.name ) with gzip.GzipFile(fileobj=UpperCAmelCase_ ) as bytestream: __lowerCamelCase : List[str] = _readaa(UpperCAmelCase_ ) if magic != 20_49: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) __lowerCamelCase : Any = _readaa(UpperCAmelCase_ ) __lowerCamelCase : Any = bytestream.read(UpperCAmelCase_ ) __lowerCamelCase : Tuple = numpy.frombuffer(UpperCAmelCase_ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(UpperCAmelCase_ , UpperCAmelCase_ ) return labels class UpperCAmelCase_ : """simple docstring""" @deprecated( SCREAMING_SNAKE_CASE_ , 'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' , ) def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=dtypes.floataa , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> str: __lowerCamelCase , __lowerCamelCase : Any = random_seed.get_seed(SCREAMING_SNAKE_CASE_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase : Optional[Any] = dtypes.as_dtype(SCREAMING_SNAKE_CASE_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: __lowerCamelCase : Optional[int] = 1_00_00 __lowerCamelCase : Any = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'images.shape: {images.shape} labels.shape: {labels.shape}' __lowerCamelCase : Optional[int] = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase : Optional[int] = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase : Union[str, Any] = images.astype(numpy.floataa ) __lowerCamelCase : Union[str, Any] = numpy.multiply(SCREAMING_SNAKE_CASE_ , 1.0 / 2_5_5.0 ) __lowerCamelCase : int = images __lowerCamelCase : int = labels __lowerCamelCase : Tuple = 0 __lowerCamelCase : Optional[int] = 0 @property def lowercase_ ( self ) -> Dict: return self._images @property def lowercase_ ( self ) -> Any: return self._labels @property def lowercase_ ( self ) -> List[Any]: return self._num_examples @property def lowercase_ ( self ) -> Union[str, Any]: return self._epochs_completed def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ) -> int: if fake_data: __lowerCamelCase : int = [1] * 7_84 __lowerCamelCase : int = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE_ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE_ )], ) __lowerCamelCase : Tuple = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase : int = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.images[perma] __lowerCamelCase : Dict = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase : Any = self._num_examples - start __lowerCamelCase : Optional[Any] = self._images[start : self._num_examples] __lowerCamelCase : Tuple = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase : Optional[int] = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.images[perm] __lowerCamelCase : str = self.labels[perm] # Start next epoch __lowerCamelCase : Any = 0 __lowerCamelCase : Optional[int] = batch_size - rest_num_examples __lowerCamelCase : Optional[int] = self._index_in_epoch __lowerCamelCase : str = self._images[start:end] __lowerCamelCase : List[str] = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase : Optional[int] = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(UpperCAmelCase_ , 'Please write your own downloading logic.' ) def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ) -> Tuple: if not gfile.Exists(UpperCAmelCase_ ): gfile.MakeDirs(UpperCAmelCase_ ) __lowerCamelCase : Tuple = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) if not gfile.Exists(UpperCAmelCase_ ): urllib.request.urlretrieve(UpperCAmelCase_ , UpperCAmelCase_ ) # noqa: S310 with gfile.GFile(UpperCAmelCase_ ) as f: __lowerCamelCase : str = f.size() print('Successfully downloaded' , UpperCAmelCase_ , UpperCAmelCase_ , 'bytes.' ) return filepath @deprecated( UpperCAmelCase_ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Optional[Any]=dtypes.floataa , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[Any]=50_00 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : List[Any]=DEFAULT_SOURCE_URL , ) -> List[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=UpperCAmelCase_ , one_hot=UpperCAmelCase_ , dtype=UpperCAmelCase_ , seed=UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = fake() __lowerCamelCase : Optional[Any] = fake() __lowerCamelCase : Dict = fake() return _Datasets(train=UpperCAmelCase_ , validation=UpperCAmelCase_ , test=UpperCAmelCase_ ) if not source_url: # empty string check __lowerCamelCase : Tuple = DEFAULT_SOURCE_URL __lowerCamelCase : Dict = 'train-images-idx3-ubyte.gz' __lowerCamelCase : int = 'train-labels-idx1-ubyte.gz' __lowerCamelCase : Union[str, Any] = 't10k-images-idx3-ubyte.gz' __lowerCamelCase : Tuple = 't10k-labels-idx1-ubyte.gz' __lowerCamelCase : Dict = _maybe_download( UpperCAmelCase_ , UpperCAmelCase_ , source_url + train_images_file ) with gfile.Open(UpperCAmelCase_ , 'rb' ) as f: __lowerCamelCase : Union[str, Any] = _extract_images(UpperCAmelCase_ ) __lowerCamelCase : List[str] = _maybe_download( UpperCAmelCase_ , UpperCAmelCase_ , source_url + train_labels_file ) with gfile.Open(UpperCAmelCase_ , 'rb' ) as f: __lowerCamelCase : List[Any] = _extract_labels(UpperCAmelCase_ , one_hot=UpperCAmelCase_ ) __lowerCamelCase : str = _maybe_download( UpperCAmelCase_ , UpperCAmelCase_ , source_url + test_images_file ) with gfile.Open(UpperCAmelCase_ , 'rb' ) as f: __lowerCamelCase : Dict = _extract_images(UpperCAmelCase_ ) __lowerCamelCase : List[Any] = _maybe_download( UpperCAmelCase_ , UpperCAmelCase_ , source_url + test_labels_file ) with gfile.Open(UpperCAmelCase_ , 'rb' ) as f: __lowerCamelCase : List[str] = _extract_labels(UpperCAmelCase_ , one_hot=UpperCAmelCase_ ) if not 0 <= validation_size <= len(UpperCAmelCase_ ): __lowerCamelCase : int = ( 'Validation size should be between 0 and ' F'{len(UpperCAmelCase_ )}. Received: {validation_size}.' ) raise ValueError(UpperCAmelCase_ ) __lowerCamelCase : Dict = train_images[:validation_size] __lowerCamelCase : str = train_labels[:validation_size] __lowerCamelCase : List[str] = train_images[validation_size:] __lowerCamelCase : int = train_labels[validation_size:] __lowerCamelCase : Any = {'dtype': dtype, 'reshape': reshape, 'seed': seed} __lowerCamelCase : Union[str, Any] = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) __lowerCamelCase : Any = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) return _Datasets(train=UpperCAmelCase_ , validation=UpperCAmelCase_ , test=UpperCAmelCase_ )
13
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any: __lowerCamelCase : Optional[Any] = parent __lowerCamelCase : int = batch_size __lowerCamelCase : Optional[int] = image_size __lowerCamelCase : Optional[int] = patch_size __lowerCamelCase : Optional[Any] = num_channels __lowerCamelCase : Dict = embed_dim __lowerCamelCase : List[Any] = depths __lowerCamelCase : int = num_heads __lowerCamelCase : Optional[Any] = window_size __lowerCamelCase : Optional[Any] = mlp_ratio __lowerCamelCase : List[str] = qkv_bias __lowerCamelCase : List[str] = hidden_dropout_prob __lowerCamelCase : int = attention_probs_dropout_prob __lowerCamelCase : List[Any] = drop_path_rate __lowerCamelCase : Any = hidden_act __lowerCamelCase : Union[str, Any] = use_absolute_embeddings __lowerCamelCase : Any = patch_norm __lowerCamelCase : Optional[Any] = layer_norm_eps __lowerCamelCase : str = initializer_range __lowerCamelCase : Dict = is_training __lowerCamelCase : Optional[Any] = scope __lowerCamelCase : Dict = use_labels __lowerCamelCase : List[str] = type_sequence_label_size __lowerCamelCase : Dict = encoder_stride __lowerCamelCase : Union[str, Any] = out_features __lowerCamelCase : str = out_indices def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : List[str] = None if self.use_labels: __lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels def lowercase_ ( self ) -> Optional[int]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: __lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : str = ['stem'] __lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs __lowerCamelCase : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase : int = False lowerCamelCase : int = False lowerCamelCase : str = False lowerCamelCase : int = False lowerCamelCase : Union[str, Any] = False def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self ) __lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def lowercase_ ( self ) -> int: pass def lowercase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase_ ( self ) -> Tuple: return def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[str]: __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ ) @unittest.skip('Swin does not use inputs_embeds' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip('Swin does not support feedforward chunking' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : str = [*signature.parameters.keys()] __lowerCamelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def lowercase_ ( self ) -> Any: pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def lowercase_ ( self ) -> List[Any]: pass def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): __lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : int = outputs.hidden_states __lowerCamelCase : Tuple = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # Swin has a different seq_length __lowerCamelCase : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __lowerCamelCase : Dict = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : Optional[int] = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : Union[str, Any] = 3 __lowerCamelCase : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __lowerCamelCase : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __lowerCamelCase : str = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : Tuple = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def lowercase_ ( self ) -> Any: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def lowercase_ ( self ) -> Union[str, Any]: pass def lowercase_ ( self ) -> Tuple: __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Any = 0 return t def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ): with torch.no_grad(): __lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple() def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:' f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has' f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.' ) , ) recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in self.all_model_classes: __lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} ) __lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} ) @require_torch class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase : List[str] = MaskFormerSwinConfig def lowercase_ ( self ) -> Tuple: __lowerCamelCase : List[str] = MaskFormerSwinModelTester(self ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ ) backbone.to(SCREAMING_SNAKE_CASE_ ) backbone.eval() __lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True __lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: __lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(outputs.attentions )
13
1
'''simple docstring''' import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder A__ : str = """base_with_context""" def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ) -> Union[str, Any]: __lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) __lowerCamelCase : List[Any] = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCAmelCase_ ) for lyr_num, lyr in enumerate(model.encoders ): __lowerCamelCase : int = weights[F'layers_{lyr_num}'] __lowerCamelCase : int = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) __lowerCamelCase : Union[str, Any] = ly_weight['attention'] __lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) __lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) __lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) __lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) __lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) __lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) __lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) __lowerCamelCase : int = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) __lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ) -> Tuple: __lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) __lowerCamelCase : List[str] = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCAmelCase_ ) for lyr_num, lyr in enumerate(model.encoders ): __lowerCamelCase : Optional[int] = weights[F'layers_{lyr_num}'] __lowerCamelCase : Any = ly_weight['attention'] __lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) __lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) __lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) __lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) __lowerCamelCase : Any = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) __lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) __lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) __lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) __lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) __lowerCamelCase : str = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ) -> Dict: __lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) __lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) __lowerCamelCase : List[str] = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCAmelCase_ ) __lowerCamelCase : List[str] = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): __lowerCamelCase : Optional[int] = weights[F'layers_{lyr_num}'] __lowerCamelCase : Dict = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) __lowerCamelCase : Dict = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) __lowerCamelCase : str = ly_weight['self_attention'] __lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) __lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) __lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) __lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) __lowerCamelCase : Union[str, Any] = ly_weight['MultiHeadDotProductAttention_0'] __lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) __lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) __lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) __lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) __lowerCamelCase : Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) __lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) __lowerCamelCase : Optional[int] = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) __lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) __lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) __lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) __lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) __lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def UpperCAmelCase__ ( UpperCAmelCase_ : Any ) -> Optional[Any]: __lowerCamelCase : Optional[Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path ) __lowerCamelCase : Union[str, Any] = jnp.tree_util.tree_map(onp.array , UpperCAmelCase_ ) __lowerCamelCase : str = [ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] __lowerCamelCase : Union[str, Any] = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) __lowerCamelCase : str = inference.parse_training_gin_file(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Optional[int] = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase_ ) __lowerCamelCase : int = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) __lowerCamelCase : Tuple = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) __lowerCamelCase : List[str] = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) __lowerCamelCase : List[str] = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) __lowerCamelCase : List[Any] = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , UpperCAmelCase_ ) __lowerCamelCase : Dict = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = load_decoder(ta_checkpoint['target']['decoder'] , UpperCAmelCase_ ) __lowerCamelCase : Optional[Any] = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) __lowerCamelCase : List[str] = SpectrogramDiffusionPipeline( notes_encoder=UpperCAmelCase_ , continuous_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , melgan=UpperCAmelCase_ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": A__ : int = argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=f'''{MODEL}/checkpoint_500000''', type=str, required=False, help="""Path to the original jax model checkpoint.""", ) A__ : str = parser.parse_args() main(args)
13
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers A__ : Dict = [ """python""", """tqdm""", """regex""", """requests""", """packaging""", """filelock""", """numpy""", """tokenizers""", """huggingface-hub""", """safetensors""", """accelerate""", """pyyaml""", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]: require_version(deps[pkg] , UpperCAmelCase_ )
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available A__ : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys A__ : List[str] = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
13
1
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=0.999 , UpperCAmelCase_ : Dict="cosine" , ) -> List[str]: if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCAmelCase_ : Dict ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCAmelCase_ : Any ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) __lowerCamelCase : List[str] = [] for i in range(UpperCAmelCase_ ): __lowerCamelCase : Any = i / num_diffusion_timesteps __lowerCamelCase : str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa ) class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = [e.name for e in KarrasDiffusionSchedulers] lowerCamelCase : List[str] = 2 @register_to_config def __init__( self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_0_0_8_5 , SCREAMING_SNAKE_CASE_ = 0.0_1_2 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ) -> Dict: if trained_betas is not None: __lowerCamelCase : Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) elif beta_schedule == "linear": __lowerCamelCase : Union[str, Any] = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __lowerCamelCase : Any = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __lowerCamelCase : Optional[int] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='cosine' ) elif beta_schedule == "exp": __lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='exp' ) else: raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' ) __lowerCamelCase : Dict = 1.0 - self.betas __lowerCamelCase : Dict = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = use_karras_sigmas def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[str]: if schedule_timesteps is None: __lowerCamelCase : Dict = self.timesteps __lowerCamelCase : Any = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __lowerCamelCase : List[str] = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0 else: __lowerCamelCase : Optional[Any] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep __lowerCamelCase : Union[str, Any] = self._index_counter[timestep_int] return indices[pos].item() @property def lowercase_ ( self ) -> Dict: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> torch.FloatTensor: __lowerCamelCase : Dict = self.index_for_timestep(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = self.sigmas[step_index] __lowerCamelCase : List[Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> str: __lowerCamelCase : Optional[int] = num_inference_steps __lowerCamelCase : List[Any] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __lowerCamelCase : List[Any] = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy() elif self.config.timestep_spacing == "leading": __lowerCamelCase : str = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowerCamelCase : Optional[Any] = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __lowerCamelCase : Any = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowerCamelCase : List[str] = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ ) timesteps -= 1 else: raise ValueError( f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' ) __lowerCamelCase : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __lowerCamelCase : Dict = np.log(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ ) if self.config.use_karras_sigmas: __lowerCamelCase : Any = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps ) __lowerCamelCase : List[str] = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] ) __lowerCamelCase : List[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __lowerCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) __lowerCamelCase : str = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): # mps does not support float64 __lowerCamelCase : int = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) else: __lowerCamelCase : Optional[int] = timesteps.to(device=SCREAMING_SNAKE_CASE_ ) # empty dt and derivative __lowerCamelCase : Tuple = None __lowerCamelCase : str = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __lowerCamelCase : int = defaultdict(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: # get log sigma __lowerCamelCase : str = np.log(SCREAMING_SNAKE_CASE_ ) # get distribution __lowerCamelCase : Optional[Any] = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range __lowerCamelCase : int = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) __lowerCamelCase : Optional[Any] = low_idx + 1 __lowerCamelCase : Optional[Any] = log_sigmas[low_idx] __lowerCamelCase : Optional[int] = log_sigmas[high_idx] # interpolate sigmas __lowerCamelCase : Union[str, Any] = (low - log_sigma) / (low - high) __lowerCamelCase : Optional[Any] = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 ) # transform interpolation to time range __lowerCamelCase : List[str] = (1 - w) * low_idx + w * high_idx __lowerCamelCase : str = t.reshape(sigma.shape ) return t def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> torch.FloatTensor: __lowerCamelCase : float = in_sigmas[-1].item() __lowerCamelCase : float = in_sigmas[0].item() __lowerCamelCase : int = 7.0 # 7.0 is the value used in the paper __lowerCamelCase : int = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = sigma_min ** (1 / rho) __lowerCamelCase : List[Any] = sigma_max ** (1 / rho) __lowerCamelCase : Union[str, Any] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def lowercase_ ( self ) -> Any: return self.dt is None def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ) -> Union[SchedulerOutput, Tuple]: __lowerCamelCase : Union[str, Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE_ ) # advance index counter by 1 __lowerCamelCase : Any = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __lowerCamelCase : List[Any] = self.sigmas[step_index] __lowerCamelCase : Optional[int] = self.sigmas[step_index + 1] else: # 2nd order / Heun's method __lowerCamelCase : str = self.sigmas[step_index - 1] __lowerCamelCase : int = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __lowerCamelCase : List[str] = 0 __lowerCamelCase : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __lowerCamelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_next __lowerCamelCase : Optional[int] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __lowerCamelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_next __lowerCamelCase : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": __lowerCamelCase : Any = model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' ) if self.config.clip_sample: __lowerCamelCase : Any = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __lowerCamelCase : str = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __lowerCamelCase : str = sigma_next - sigma_hat # store for 2nd order step __lowerCamelCase : str = derivative __lowerCamelCase : Any = dt __lowerCamelCase : Any = sample else: # 2. 2nd order / Heun's method __lowerCamelCase : Optional[Any] = (sample - pred_original_sample) / sigma_next __lowerCamelCase : int = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample __lowerCamelCase : Optional[int] = self.dt __lowerCamelCase : Dict = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" __lowerCamelCase : Dict = None __lowerCamelCase : Any = None __lowerCamelCase : Optional[Any] = None __lowerCamelCase : int = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowerCamelCase : Optional[int] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ): # mps does not support float64 __lowerCamelCase : str = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __lowerCamelCase : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __lowerCamelCase : Dict = self.timesteps.to(original_samples.device ) __lowerCamelCase : Any = timesteps.to(original_samples.device ) __lowerCamelCase : int = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps] __lowerCamelCase : Optional[int] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __lowerCamelCase : Dict = sigma.unsqueeze(-1 ) __lowerCamelCase : int = original_samples + noise * sigma return noisy_samples def __len__( self ) -> Any: return self.config.num_train_timesteps
13
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""") def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) ) A__ : str = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
13
1
'''simple docstring''' import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=5 ) -> Optional[Any]: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 __lowerCamelCase : Dict = torch.tensor(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) ).unsqueeze(0 ) # Batch size 1 __lowerCamelCase : Tuple = model(UpperCAmelCase_ )[0] # The last hidden-state is the first element of the output tuple __lowerCamelCase : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() __lowerCamelCase : Optional[Any] = logits[0, masked_index, :] __lowerCamelCase : List[str] = logits.softmax(dim=0 ) __lowerCamelCase , __lowerCamelCase : Tuple = prob.topk(k=UpperCAmelCase_ , dim=0 ) __lowerCamelCase : str = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCAmelCase_ ) )] ) __lowerCamelCase : Any = tokenizer.mask_token __lowerCamelCase : Optional[int] = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): __lowerCamelCase : str = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(UpperCAmelCase_ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(UpperCAmelCase_ ) , UpperCAmelCase_ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(UpperCAmelCase_ , UpperCAmelCase_ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs A__ : Dict = CamembertTokenizer.from_pretrained("""camembert-base""") A__ : Union[str, Any] = CamembertForMaskedLM.from_pretrained("""camembert-base""") model.eval() A__ : Optional[Any] = """Le camembert est <mask> :)""" print(fill_mask(masked_input, model, tokenizer, topk=3))
13
'''simple docstring''' from __future__ import annotations import os from collections.abc import Mapping A__ : Optional[Any] = tuple[int, int] class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: __lowerCamelCase : set[int] = vertices __lowerCamelCase : dict[EdgeT, int] = { (min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items() } def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) __lowerCamelCase : Union[str, Any] = weight def lowercase_ ( self ) -> Graph: __lowerCamelCase : Graph = Graph({min(self.vertices )} , {} ) __lowerCamelCase : EdgeT __lowerCamelCase : int __lowerCamelCase : EdgeT __lowerCamelCase : int while len(subgraph.vertices ) < len(self.vertices ): __lowerCamelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __lowerCamelCase : Optional[int] = edge __lowerCamelCase : List[str] = weight subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return subgraph def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int: __lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) ) __lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : dict[EdgeT, int] = {} __lowerCamelCase : list[str] __lowerCamelCase : int __lowerCamelCase : int with open(UpperCAmelCase_ ) as f: __lowerCamelCase : Any = f.read().strip().split('\n' ) __lowerCamelCase : Any = [line.split(',' ) for line in data] for edgea in range(1 , len(UpperCAmelCase_ ) ): for edgea in range(UpperCAmelCase_ ): if adjaceny_matrix[edgea][edgea] != "-": __lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] ) __lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ ) __lowerCamelCase : Graph = graph.prims_algorithm() __lowerCamelCase : int = sum(graph.edges.values() ) __lowerCamelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING A__ : Optional[int] = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , **SCREAMING_SNAKE_CASE_ ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) if self.framework == "tf": raise ValueError(f'The {self.__class__} is only available in PyTorch.' ) requires_backends(self , 'vision' ) self.check_model_type(SCREAMING_SNAKE_CASE_ ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: if "text_queries" in kwargs: __lowerCamelCase : List[Any] = kwargs.pop('text_queries' ) if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image) ): __lowerCamelCase : str = {'image': image, 'candidate_labels': candidate_labels} else: __lowerCamelCase : List[str] = image __lowerCamelCase : Optional[int] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return results def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : Union[str, Any] = {} if "threshold" in kwargs: __lowerCamelCase : Any = kwargs['threshold'] if "top_k" in kwargs: __lowerCamelCase : Tuple = kwargs['top_k'] return {}, {}, postprocess_params def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : Union[str, Any] = load_image(inputs['image'] ) __lowerCamelCase : int = inputs['candidate_labels'] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = candidate_labels.split(',' ) __lowerCamelCase : int = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) __lowerCamelCase : str = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) yield { "is_last": i == len(SCREAMING_SNAKE_CASE_ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Optional[Any] = model_inputs.pop('target_size' ) __lowerCamelCase : List[Any] = model_inputs.pop('candidate_label' ) __lowerCamelCase : List[Any] = model_inputs.pop('is_last' ) __lowerCamelCase : str = self.model(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs} return model_outputs def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None ) -> Tuple: __lowerCamelCase : Any = [] for model_output in model_outputs: __lowerCamelCase : Tuple = model_output['candidate_label'] __lowerCamelCase : str = BaseModelOutput(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = self.image_processor.post_process_object_detection( outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output['target_size'] )[0] for index in outputs["scores"].nonzero(): __lowerCamelCase : Optional[int] = outputs['scores'][index].item() __lowerCamelCase : List[Any] = self._get_bounding_box(outputs['boxes'][index][0] ) __lowerCamelCase : Dict = {'score': score, 'label': label, 'box': box} results.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x["score"] , reverse=SCREAMING_SNAKE_CASE_ ) if top_k: __lowerCamelCase : List[str] = results[:top_k] return results def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict[str, int]: if self.framework != "pt": raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = box.int().tolist() __lowerCamelCase : Any = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
13
'''simple docstring''' from collections.abc import Generator from math import sin def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: if len(UpperCAmelCase_ ) != 32: raise ValueError('Input must be of length 32' ) __lowerCamelCase : Dict = B'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes: if i < 0: raise ValueError('Input must be non-negative' ) __lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:] __lowerCamelCase : str = B'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: __lowerCamelCase : Optional[Any] = B'' for char in message: bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' ) __lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCAmelCase_ ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]: if len(UpperCAmelCase_ ) % 5_12 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ): __lowerCamelCase : Any = bit_string[pos : pos + 5_12] __lowerCamelCase : Optional[int] = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: if i < 0: raise ValueError('Input must be non-negative' ) __lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' ) __lowerCamelCase : Optional[int] = '' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCAmelCase_ , 2 ) def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: return (a + b) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: __lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __lowerCamelCase : Dict = 0x67_45_23_01 __lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89 __lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe __lowerCamelCase : Union[str, Any] = 0x10_32_54_76 __lowerCamelCase : List[str] = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCAmelCase_ ): __lowerCamelCase : Dict = aa __lowerCamelCase : Tuple = ba __lowerCamelCase : List[Any] = ca __lowerCamelCase : Dict = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __lowerCamelCase : List[str] = d ^ (b & (c ^ d)) __lowerCamelCase : Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __lowerCamelCase : Optional[int] = c ^ (d & (b ^ c)) __lowerCamelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: __lowerCamelCase : str = b ^ c ^ d __lowerCamelCase : Any = (3 * i + 5) % 16 else: __lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ )) __lowerCamelCase : int = (7 * i) % 16 __lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32 __lowerCamelCase : Optional[Any] = d __lowerCamelCase : Tuple = c __lowerCamelCase : Optional[int] = b __lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) ) # Add hashed chunk to running total __lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed A__ : List[str] = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> Dict: assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Union[str, Any]: if args.student_type == "roberta": __lowerCamelCase : List[str] = False elif args.student_type == "gpt2": __lowerCamelCase : Any = False def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> str: if args.student_type == "roberta": __lowerCamelCase : Optional[Any] = False def UpperCAmelCase__ ( ) -> Union[str, Any]: __lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=UpperCAmelCase_ , choices=['distilbert', 'roberta', 'gpt2'] , required=UpperCAmelCase_ , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=UpperCAmelCase_ , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=UpperCAmelCase_ , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=UpperCAmelCase_ , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=UpperCAmelCase_ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=UpperCAmelCase_ , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=UpperCAmelCase_ , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=UpperCAmelCase_ , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=UpperCAmelCase_ , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=UpperCAmelCase_ , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=UpperCAmelCase_ , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=UpperCAmelCase_ , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=UpperCAmelCase_ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=UpperCAmelCase_ , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=UpperCAmelCase_ , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=UpperCAmelCase_ , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=UpperCAmelCase_ , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=UpperCAmelCase_ , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=UpperCAmelCase_ , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5e-4 , type=UpperCAmelCase_ , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1e-6 , type=UpperCAmelCase_ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=UpperCAmelCase_ , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=UpperCAmelCase_ , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=UpperCAmelCase_ , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=UpperCAmelCase_ , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=UpperCAmelCase_ , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=UpperCAmelCase_ , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=UpperCAmelCase_ , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=UpperCAmelCase_ , default=40_00 , help='Checkpoint interval.' ) __lowerCamelCase : Optional[int] = parser.parse_args() sanity_checks(UpperCAmelCase_ ) # ARGS # init_gpu_params(UpperCAmelCase_ ) set_seed(UpperCAmelCase_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'Experiment will be dumped and logged in {args.dump_path}' ) # SAVE PARAMS # logger.info(F'Param: {args}' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(UpperCAmelCase_ ) , UpperCAmelCase_ , indent=4 ) git_log(args.dump_path ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = MODEL_CLASSES[args.student_type] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __lowerCamelCase : Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __lowerCamelCase : int = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __lowerCamelCase : List[Any] = tokenizer.all_special_tokens.index(UpperCAmelCase_ ) __lowerCamelCase : List[Any] = tokenizer.all_special_ids[idx] logger.info(F'Special tokens {special_tok_ids}' ) __lowerCamelCase : Dict = special_tok_ids __lowerCamelCase : Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'Loading data from {args.data_file}' ) with open(args.data_file , 'rb' ) as fp: __lowerCamelCase : Any = pickle.load(UpperCAmelCase_ ) if args.mlm: logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' ) with open(args.token_counts , 'rb' ) as fp: __lowerCamelCase : Tuple = pickle.load(UpperCAmelCase_ ) __lowerCamelCase : Tuple = np.maximum(UpperCAmelCase_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __lowerCamelCase : Any = 0.0 # do not predict special tokens __lowerCamelCase : List[Any] = torch.from_numpy(UpperCAmelCase_ ) else: __lowerCamelCase : Optional[Any] = None __lowerCamelCase : Any = LmSeqsDataset(params=UpperCAmelCase_ , data=UpperCAmelCase_ ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'Loading student config from {args.student_config}' ) __lowerCamelCase : List[Any] = student_config_class.from_pretrained(args.student_config ) __lowerCamelCase : Optional[Any] = True if args.student_pretrained_weights is not None: logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' ) __lowerCamelCase : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCAmelCase_ ) else: __lowerCamelCase : Optional[Any] = student_model_class(UpperCAmelCase_ ) if args.n_gpu > 0: student.to(F'cuda:{args.local_rank}' ) logger.info('Student loaded.' ) # TEACHER # __lowerCamelCase : Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCAmelCase_ ) if args.n_gpu > 0: teacher.to(F'cuda:{args.local_rank}' ) logger.info(F'Teacher loaded from {args.teacher_name}.' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(UpperCAmelCase_ , UpperCAmelCase_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(UpperCAmelCase_ , UpperCAmelCase_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __lowerCamelCase : Dict = Distiller( params=UpperCAmelCase_ , dataset=UpperCAmelCase_ , token_probs=UpperCAmelCase_ , student=UpperCAmelCase_ , teacher=UpperCAmelCase_ ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
13
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : Dict = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = 'rwkv' lowerCamelCase : Any = {'max_position_embeddings': 'context_length'} def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: __lowerCamelCase : Optional[int] = vocab_size __lowerCamelCase : Tuple = context_length __lowerCamelCase : str = hidden_size __lowerCamelCase : List[str] = num_hidden_layers __lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size __lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size __lowerCamelCase : Optional[Any] = layer_norm_epsilon __lowerCamelCase : int = rescale_every __lowerCamelCase : Tuple = use_cache __lowerCamelCase : int = bos_token_id __lowerCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""") def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) ) A__ : str = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
13
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int: __lowerCamelCase : Union[str, Any] = 3 __lowerCamelCase : Dict = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable A__ : str = list[list[float | int]] def UpperCAmelCase__ ( UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix ) -> Matrix: __lowerCamelCase : int = len(UpperCAmelCase_ ) __lowerCamelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase_ )] __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : float for row in range(UpperCAmelCase_ ): for col in range(UpperCAmelCase_ ): __lowerCamelCase : Union[str, Any] = matrix[row][col] __lowerCamelCase : Optional[Any] = vector[row][0] __lowerCamelCase : int = 0 __lowerCamelCase : Optional[Any] = 0 while row < size and col < size: # pivoting __lowerCamelCase : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase_ , UpperCAmelCase_ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __lowerCamelCase , __lowerCamelCase : Optional[int] = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , UpperCAmelCase_ ): __lowerCamelCase : List[Any] = augmented[rowa][col] / augmented[row][col] __lowerCamelCase : Dict = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , UpperCAmelCase_ ): for row in range(UpperCAmelCase_ ): __lowerCamelCase : Optional[int] = augmented[row][col] / augmented[col][col] for cola in range(UpperCAmelCase_ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase_ ) ] def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> Callable[[int], int]: __lowerCamelCase : int = len(UpperCAmelCase_ ) __lowerCamelCase : Matrix = [[0 for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )] __lowerCamelCase : Matrix = [[0] for _ in range(UpperCAmelCase_ )] __lowerCamelCase : Matrix __lowerCamelCase : int __lowerCamelCase : int __lowerCamelCase : int for x_val, y_val in enumerate(UpperCAmelCase_ ): for col in range(UpperCAmelCase_ ): __lowerCamelCase : str = (x_val + 1) ** (size - col - 1) __lowerCamelCase : str = y_val __lowerCamelCase : Dict = solve(UpperCAmelCase_ , UpperCAmelCase_ ) def interpolated_func(UpperCAmelCase_ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCAmelCase_ ) ) return interpolated_func def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def UpperCAmelCase__ ( UpperCAmelCase_ : Callable[[int], int] = question_function , UpperCAmelCase_ : int = 10 ) -> int: __lowerCamelCase : list[int] = [func(UpperCAmelCase_ ) for x_val in range(1 , order + 1 )] __lowerCamelCase : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __lowerCamelCase : int = 0 __lowerCamelCase : Callable[[int], int] __lowerCamelCase : int for poly in polynomials: __lowerCamelCase : Optional[Any] = 1 while func(UpperCAmelCase_ ) == poly(UpperCAmelCase_ ): x_val += 1 ret += poly(UpperCAmelCase_ ) return ret if __name__ == "__main__": print(f'''{solution() = }''')
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Dict = XGLMConfig lowerCamelCase : List[str] = {} lowerCamelCase : Union[str, Any] = 'gelu' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any: __lowerCamelCase : int = parent __lowerCamelCase : Optional[int] = batch_size __lowerCamelCase : Optional[Any] = seq_length __lowerCamelCase : Optional[int] = is_training __lowerCamelCase : str = use_input_mask __lowerCamelCase : Dict = use_labels __lowerCamelCase : Union[str, Any] = vocab_size __lowerCamelCase : List[Any] = d_model __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : Optional[Any] = ffn_dim __lowerCamelCase : List[Any] = activation_function __lowerCamelCase : List[Any] = activation_dropout __lowerCamelCase : List[Any] = attention_dropout __lowerCamelCase : Union[str, Any] = max_position_embeddings __lowerCamelCase : Tuple = initializer_range __lowerCamelCase : int = None __lowerCamelCase : int = 0 __lowerCamelCase : Tuple = 2 __lowerCamelCase : Tuple = 1 def lowercase_ ( self ) -> Any: return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Optional[Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) __lowerCamelCase : Optional[int] = None if self.use_input_mask: __lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase : str = self.get_config() __lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase_ ( self ) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> str: __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : str = config_and_inputs __lowerCamelCase : Union[str, Any] = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase : Any = ( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase : List[Any] = False lowerCamelCase : Dict = False lowerCamelCase : Union[str, Any] = False def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : str = TFXGLMModelTester(self ) __lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 ) def lowercase_ ( self ) -> Dict: self.config_tester.run_common_tests() @slow def lowercase_ ( self ) -> Optional[int]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def lowercase_ ( self ) -> Any: super().test_resize_token_embeddings() @require_tf class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]: __lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on __lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ ) @slow def lowercase_ ( self ) -> int: __lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) __lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' ) __lowerCamelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): __lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] ) __lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def lowercase_ ( self ) -> int: __lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Any = 'left' # use different length sentences to test batching __lowerCamelCase : Any = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = inputs['input_ids'] __lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 ) __lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids __lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 ) __lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids __lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 ) __lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
13
1
'''simple docstring''' import sys from collections import defaultdict class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> int: __lowerCamelCase : Any = [] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any: return self.node_position[vertex] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: __lowerCamelCase : Optional[int] = pos def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase : str = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase : Optional[Any] = 2 * start + 1 else: __lowerCamelCase : int = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase : int = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase : str = temp, tempa __lowerCamelCase : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ ) self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Any = position[index] while index != 0: __lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase : Union[str, Any] = heap[parent] __lowerCamelCase : Any = position[parent] self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase : Tuple = val __lowerCamelCase : List[str] = temp self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) break __lowerCamelCase : Tuple = parent else: __lowerCamelCase : Union[str, Any] = val __lowerCamelCase : Tuple = temp self.set_position(SCREAMING_SNAKE_CASE_ , 0 ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: __lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1 for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ): self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : Any = positions[0] __lowerCamelCase : Union[str, Any] = sys.maxsize self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return temp def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str: __lowerCamelCase : List[Any] = Heap() __lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ ) __lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase : Tuple = [] for vertex in range(len(UpperCAmelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCAmelCase_ ) heap.node_position.append(UpperCAmelCase_ ) __lowerCamelCase : Tuple = [] __lowerCamelCase : Dict = 1 __lowerCamelCase : str = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase : Any = 0 __lowerCamelCase : Any = distance heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ ) for _ in range(1 , len(UpperCAmelCase_ ) ): __lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase : Union[str, Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCAmelCase_ )] ): __lowerCamelCase : Dict = distance heap.bottom_to_top( UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > A__ : Tuple = int(input("""Enter number of edges: """).strip()) A__ : str = defaultdict(list) for _ in range(edges_number): A__ : Optional[int] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : List[str] = logging.get_logger(__name__) # TODO Update this A__ : Tuple = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = 'esm' def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = vocab_size __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : str = num_hidden_layers __lowerCamelCase : List[str] = num_attention_heads __lowerCamelCase : Any = intermediate_size __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : Optional[int] = max_position_embeddings __lowerCamelCase : str = initializer_range __lowerCamelCase : Optional[int] = layer_norm_eps __lowerCamelCase : List[str] = position_embedding_type __lowerCamelCase : int = use_cache __lowerCamelCase : Optional[Any] = emb_layer_norm_before __lowerCamelCase : Optional[Any] = token_dropout __lowerCamelCase : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) __lowerCamelCase : Dict = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) __lowerCamelCase : List[str] = get_default_vocab_list() else: __lowerCamelCase : Optional[Any] = vocab_list else: __lowerCamelCase : Dict = None __lowerCamelCase : Optional[Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def lowercase_ ( self ) -> Any: __lowerCamelCase : Any = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : int = self.esmfold_config.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str = None lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : float = 0 lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : int = 1_2_8 lowerCamelCase : "TrunkConfig" = None def lowercase_ ( self ) -> Any: if self.trunk is None: __lowerCamelCase : List[str] = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Any = TrunkConfig(**self.trunk ) def lowercase_ ( self ) -> int: __lowerCamelCase : Optional[int] = asdict(self ) __lowerCamelCase : str = self.trunk.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 4_8 lowerCamelCase : int = 1_0_2_4 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 3_2 lowerCamelCase : int = 3_2 lowerCamelCase : int = 3_2 lowerCamelCase : float = 0 lowerCamelCase : float = 0 lowerCamelCase : bool = False lowerCamelCase : int = 4 lowerCamelCase : Optional[int] = 1_2_8 lowerCamelCase : "StructureModuleConfig" = None def lowercase_ ( self ) -> Optional[int]: if self.structure_module is None: __lowerCamelCase : Dict = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' f' {self.sequence_state_dim} and {self.sequence_state_dim}.' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' ) __lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width __lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' ) if self.dropout >= 0.4: raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : List[str] = asdict(self ) __lowerCamelCase : int = self.structure_module.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 3_8_4 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 1_6 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 1_2 lowerCamelCase : int = 4 lowerCamelCase : int = 8 lowerCamelCase : float = 0.1 lowerCamelCase : int = 8 lowerCamelCase : int = 1 lowerCamelCase : int = 2 lowerCamelCase : int = 7 lowerCamelCase : int = 1_0 lowerCamelCase : float = 1e-8 lowerCamelCase : float = 1e5 def lowercase_ ( self ) -> Any: return asdict(self ) def UpperCAmelCase__ ( ) -> Optional[Any]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
13
1
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __lowerCamelCase : Union[str, Any] = F'Input value of [number={number}] must be an integer' raise TypeError(UpperCAmelCase_ ) if number < 1: __lowerCamelCase : Tuple = F'Input value of [number={number}] must be > 0' raise ValueError(UpperCAmelCase_ ) __lowerCamelCase : Optional[int] = 1 for i in range(1 , UpperCAmelCase_ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' A__ : dict[tuple[int, int, int], int] = {} def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on __lowerCamelCase : List[Any] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one __lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 __lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter __lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 ) __lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime __lowerCamelCase : Union[str, Any] = prizestrings return prizestrings def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int: return _calculate(UpperCAmelCase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
13
1
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : int = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } A__ : List[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } A__ : Tuple = {"""facebook/blenderbot_small-90M""": 512} def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> List[Any]: __lowerCamelCase : Dict = set() __lowerCamelCase : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase : Optional[Any] = char __lowerCamelCase : str = set(UpperCAmelCase_ ) return pairs class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = VOCAB_FILES_NAMES lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="__start__" , SCREAMING_SNAKE_CASE_="__end__" , SCREAMING_SNAKE_CASE_="__unk__" , SCREAMING_SNAKE_CASE_="__null__" , **SCREAMING_SNAKE_CASE_ , ) -> int: super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle: __lowerCamelCase : Tuple = json.load(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle: __lowerCamelCase : Dict = merges_handle.read().split('\n' )[1:-1] __lowerCamelCase : int = [tuple(merge.split() ) for merge in merges] __lowerCamelCase : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) __lowerCamelCase : Any = {} @property def lowercase_ ( self ) -> int: return len(self.encoder ) def lowercase_ ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase : Dict = re.sub('([.,!?()])' , r' \1' , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = re.sub('(\')' , r' \1 ' , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = re.sub(r'\s{2,}' , ' ' , SCREAMING_SNAKE_CASE_ ) if "\n" in token: __lowerCamelCase : List[str] = token.replace('\n' , ' __newln__' ) __lowerCamelCase : Dict = token.split(' ' ) __lowerCamelCase : List[str] = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE_ ): continue __lowerCamelCase : Optional[int] = token.lower() __lowerCamelCase : List[str] = tuple(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) __lowerCamelCase : List[Any] = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: words.append(SCREAMING_SNAKE_CASE_ ) continue while True: __lowerCamelCase : List[Any] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase : Optional[int] = bigram __lowerCamelCase : Tuple = [] __lowerCamelCase : Dict = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: __lowerCamelCase : Any = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) new_word.extend(word[i:j] ) __lowerCamelCase : Optional[Any] = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase : str = tuple(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: __lowerCamelCase : List[str] = get_pairs(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = '@@ '.join(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = word[:-4] __lowerCamelCase : Union[str, Any] = word words.append(SCREAMING_SNAKE_CASE_ ) return " ".join(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : Optional[Any] = [] __lowerCamelCase : List[str] = re.findall(r'\S+\n?' , SCREAMING_SNAKE_CASE_ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) ) ) return split_tokens def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : List[str] = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str: __lowerCamelCase : Tuple = ' '.join(SCREAMING_SNAKE_CASE_ ).replace('@@ ' , '' ).strip() return out_string def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowerCamelCase : Dict = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __lowerCamelCase : Dict = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' ) __lowerCamelCase : int = 0 with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!' ) __lowerCamelCase : int = token_index writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' ) index += 1 return vocab_file, merge_file
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str lowerCamelCase : Optional[str] = None lowerCamelCase : Optional[Union[str, int]] = None lowerCamelCase : Optional[Union[str, int]] = None lowerCamelCase : Optional[Union[str, int]] = None def lowercase_ ( self ) -> List[str]: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self ) -> Any: return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def lowercase_ ( self ) -> int: return self.major, self.minor, self.patch def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return Version(SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return other raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' ) def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: try: __lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ ) return self.tuple < other.tuple def __hash__( self ) -> List[str]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase_ ( self ) -> str: return self.version_str def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str: __lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict: return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
13
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : Dict = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = 'rwkv' lowerCamelCase : Any = {'max_position_embeddings': 'context_length'} def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: __lowerCamelCase : Optional[int] = vocab_size __lowerCamelCase : Tuple = context_length __lowerCamelCase : str = hidden_size __lowerCamelCase : List[str] = num_hidden_layers __lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size __lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size __lowerCamelCase : Optional[Any] = layer_norm_epsilon __lowerCamelCase : int = rescale_every __lowerCamelCase : Tuple = use_cache __lowerCamelCase : int = bos_token_id __lowerCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
13
'''simple docstring''' import sys from collections import defaultdict class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> int: __lowerCamelCase : Any = [] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any: return self.node_position[vertex] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: __lowerCamelCase : Optional[int] = pos def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase : str = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase : Optional[Any] = 2 * start + 1 else: __lowerCamelCase : int = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase : int = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase : str = temp, tempa __lowerCamelCase : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ ) self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Any = position[index] while index != 0: __lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase : Union[str, Any] = heap[parent] __lowerCamelCase : Any = position[parent] self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase : Tuple = val __lowerCamelCase : List[str] = temp self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) break __lowerCamelCase : Tuple = parent else: __lowerCamelCase : Union[str, Any] = val __lowerCamelCase : Tuple = temp self.set_position(SCREAMING_SNAKE_CASE_ , 0 ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: __lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1 for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ): self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : Any = positions[0] __lowerCamelCase : Union[str, Any] = sys.maxsize self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return temp def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str: __lowerCamelCase : List[Any] = Heap() __lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ ) __lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase : Tuple = [] for vertex in range(len(UpperCAmelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCAmelCase_ ) heap.node_position.append(UpperCAmelCase_ ) __lowerCamelCase : Tuple = [] __lowerCamelCase : Dict = 1 __lowerCamelCase : str = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase : Any = 0 __lowerCamelCase : Any = distance heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ ) for _ in range(1 , len(UpperCAmelCase_ ) ): __lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase : Union[str, Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCAmelCase_ )] ): __lowerCamelCase : Dict = distance heap.bottom_to_top( UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > A__ : Tuple = int(input("""Enter number of edges: """).strip()) A__ : str = defaultdict(list) for _ in range(edges_number): A__ : Optional[int] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
1
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
13
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int: __lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6 __lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase : Any = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias'] @register_to_config def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 5_02_57 , SCREAMING_SNAKE_CASE_ = 10_24 , SCREAMING_SNAKE_CASE_ = 7_68 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "gelu_new" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 1E-5 , SCREAMING_SNAKE_CASE_ = 0.0_2 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , ) -> Tuple: super().__init__() __lowerCamelCase : int = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and' f' `n_embd`: {n_embd} are not equal.' ) __lowerCamelCase : List[Any] = prefix_inner_dim __lowerCamelCase : Any = prefix_hidden_dim __lowerCamelCase : Dict = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) __lowerCamelCase : Dict = ( nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) __lowerCamelCase : str = GPTaConfig( vocab_size=SCREAMING_SNAKE_CASE_ , n_positions=SCREAMING_SNAKE_CASE_ , n_embd=SCREAMING_SNAKE_CASE_ , n_layer=SCREAMING_SNAKE_CASE_ , n_head=SCREAMING_SNAKE_CASE_ , n_inner=SCREAMING_SNAKE_CASE_ , activation_function=SCREAMING_SNAKE_CASE_ , resid_pdrop=SCREAMING_SNAKE_CASE_ , embd_pdrop=SCREAMING_SNAKE_CASE_ , attn_pdrop=SCREAMING_SNAKE_CASE_ , layer_norm_epsilon=SCREAMING_SNAKE_CASE_ , initializer_range=SCREAMING_SNAKE_CASE_ , scale_attn_weights=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE_ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Dict = GPTaLMHeadModel(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> List[str]: __lowerCamelCase : Any = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = self.encode_prefix(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = self.decode_prefix(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: __lowerCamelCase : Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) __lowerCamelCase : Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) __lowerCamelCase : Union[str, Any] = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> torch.Tensor: return torch.zeros(SCREAMING_SNAKE_CASE_ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: return self.encode_prefix(SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : int = torch.split(SCREAMING_SNAKE_CASE_ , 1 , dim=0 ) __lowerCamelCase : List[str] = [] __lowerCamelCase : Tuple = [] for feature in features: __lowerCamelCase : List[str] = self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE_ ) ) # back to the clip feature # Only support beam search for now __lowerCamelCase , __lowerCamelCase : int = self.generate_beam( input_embeds=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) __lowerCamelCase : Optional[Any] = torch.stack(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = torch.stack(SCREAMING_SNAKE_CASE_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = 5 , SCREAMING_SNAKE_CASE_ = 67 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Union[str, Any]: __lowerCamelCase : Dict = eos_token_id __lowerCamelCase : Dict = None __lowerCamelCase : List[str] = None __lowerCamelCase : Tuple = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.int ) __lowerCamelCase : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.bool ) if input_embeds is not None: __lowerCamelCase : Any = input_embeds else: __lowerCamelCase : Dict = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Tuple = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = outputs.logits __lowerCamelCase : Tuple = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) __lowerCamelCase : Optional[int] = logits.softmax(-1 ).log() if scores is None: __lowerCamelCase , __lowerCamelCase : Dict = logits.topk(SCREAMING_SNAKE_CASE_ , -1 ) __lowerCamelCase : Union[str, Any] = generated.expand(SCREAMING_SNAKE_CASE_ , *generated.shape[1:] ) __lowerCamelCase , __lowerCamelCase : Union[str, Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: __lowerCamelCase : Optional[int] = next_tokens else: __lowerCamelCase : int = tokens.expand(SCREAMING_SNAKE_CASE_ , *tokens.shape[1:] ) __lowerCamelCase : Any = torch.cat((tokens, next_tokens) , dim=1 ) else: __lowerCamelCase : Optional[int] = -float(np.inf ) __lowerCamelCase : Union[str, Any] = 0 __lowerCamelCase : str = scores[:, None] + logits seq_lengths[~is_stopped] += 1 __lowerCamelCase : int = scores_sum / seq_lengths[:, None] __lowerCamelCase , __lowerCamelCase : Optional[int] = scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE_ , -1 ) __lowerCamelCase : int = next_tokens // scores_sum.shape[1] __lowerCamelCase : List[str] = seq_lengths[next_tokens_source] __lowerCamelCase : Optional[int] = next_tokens % scores_sum.shape[1] __lowerCamelCase : Tuple = next_tokens.unsqueeze(1 ) __lowerCamelCase : List[str] = tokens[next_tokens_source] __lowerCamelCase : Tuple = torch.cat((tokens, next_tokens) , dim=1 ) __lowerCamelCase : int = generated[next_tokens_source] __lowerCamelCase : Optional[Any] = scores_sum_average * seq_lengths __lowerCamelCase : str = is_stopped[next_tokens_source] __lowerCamelCase : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) __lowerCamelCase : Any = torch.cat((generated, next_token_embed) , dim=1 ) __lowerCamelCase : Dict = is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE_ ).squeeze() if is_stopped.all(): break __lowerCamelCase : Tuple = scores / seq_lengths __lowerCamelCase : Tuple = scores.argsort(descending=SCREAMING_SNAKE_CASE_ ) # tokens tensors are already padded to max_seq_length __lowerCamelCase : Union[str, Any] = [tokens[i] for i in order] __lowerCamelCase : List[Any] = torch.stack(SCREAMING_SNAKE_CASE_ , dim=0 ) __lowerCamelCase : int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
13
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]: __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Dict = batch_size __lowerCamelCase : int = image_size __lowerCamelCase : List[str] = patch_size __lowerCamelCase : Optional[int] = num_channels __lowerCamelCase : Any = is_training __lowerCamelCase : Dict = use_labels __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Optional[Any] = num_attention_heads __lowerCamelCase : Dict = intermediate_size __lowerCamelCase : Union[str, Any] = hidden_act __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : str = type_sequence_label_size __lowerCamelCase : List[str] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (image_size // patch_size) ** 2 __lowerCamelCase : Optional[int] = num_patches + 1 def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : Optional[int] = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (self.image_size, self.image_size) __lowerCamelCase : str = (self.patch_size, self.patch_size) __lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: __lowerCamelCase : Tuple = self.type_sequence_label_size __lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCamelCase : List[str] = 1 __lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : int = config_and_inputs __lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowercase_ ( self ) -> None: __lowerCamelCase : str = FlaxViTModelTester(self ) __lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def lowercase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : List[str] = [*signature.parameters.keys()] __lowerCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest('JIT Enabled' ): __lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self ) -> List[Any]: for model_class_name in self.all_model_classes: __lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' ) __lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer A__ : str = ["""bert-base-uncased""", """bert-base-cased"""] A__ : Union[str, Any] = """hf-internal-testing/tiny-bert-tf-only""" if is_tf_available(): class UpperCAmelCase_ (tf.keras.Model ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: super().__init__() __lowerCamelCase : Union[str, Any] = tokenizer __lowerCamelCase : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = TFAutoModel.from_config(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : str = self.tokenizer(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = self.bert(**SCREAMING_SNAKE_CASE_ ) return out["pooler_output"] @require_tf @require_tensorflow_text class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Any: super().setUp() __lowerCamelCase : List[str] = [ BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false __lowerCamelCase : Union[str, Any] = [TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __lowerCamelCase : Optional[Any] = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] __lowerCamelCase : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def lowercase_ ( self ) -> Optional[int]: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): __lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding='longest' ) __lowerCamelCase : Any = tf_tokenizer(SCREAMING_SNAKE_CASE_ ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def lowercase_ ( self ) -> List[str]: for tf_tokenizer in self.tf_tokenizers: __lowerCamelCase : Any = tf_tokenizer(self.paired_sentences ) __lowerCamelCase : int = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def lowercase_ ( self ) -> int: for tf_tokenizer in self.tf_tokenizers: __lowerCamelCase : Optional[int] = tf.function(SCREAMING_SNAKE_CASE_ ) for test_inputs in (self.test_sentences, self.paired_sentences): __lowerCamelCase : Dict = tf.constant(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = compiled_tokenizer(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = tf_tokenizer(SCREAMING_SNAKE_CASE_ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def lowercase_ ( self ) -> Any: for tf_tokenizer in self.tf_tokenizers: __lowerCamelCase : Union[str, Any] = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = tf.convert_to_tensor(self.test_sentences ) __lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __lowerCamelCase : List[Any] = Path(SCREAMING_SNAKE_CASE_ ) / 'saved.model' model.save(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = tf.keras.models.load_model(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = loaded_model(SCREAMING_SNAKE_CASE_ ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
13
'''simple docstring''' import argparse A__ : Optional[Any] = """docs/source/_static/js/custom.js""" def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int: with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f: __lowerCamelCase : Dict = f.readlines() __lowerCamelCase : Tuple = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 __lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += F' "v{version}": "v{version}",\n' with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(UpperCAmelCase_ ) if __name__ == "__main__": A__ : str = argparse.ArgumentParser() parser.add_argument("""--version""", help="""Release version.""") A__ : Any = parser.parse_args() update_custom_js(args.version)
13
1
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> float: if not nums: # Makes sure that the list is not empty raise ValueError('List is empty' ) __lowerCamelCase : Union[str, Any] = sum(UpperCAmelCase_ ) / len(UpperCAmelCase_ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(UpperCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape __lowerCamelCase : Dict = jax.image.resize( SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) __lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ ) return hidden_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> List[str]: __lowerCamelCase : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ ) return hidden_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : int = None lowerCamelCase : float = 0.0 lowerCamelCase : bool = None lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels __lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __lowerCamelCase : Tuple = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype ) __lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __lowerCamelCase : int = nn.Dropout(self.dropout_prob ) __lowerCamelCase : Union[str, Any] = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __lowerCamelCase : List[Any] = None if use_nin_shortcut: __lowerCamelCase : Any = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple: __lowerCamelCase : List[Any] = hidden_states __lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 ) __lowerCamelCase : Optional[int] = hidden_states + temb __lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ ) if self.conv_shortcut is not None: __lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ ) return hidden_states + residual
13
1
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : List[str] = logging.get_logger(__name__) # TODO Update this A__ : Tuple = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = 'esm' def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = vocab_size __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : str = num_hidden_layers __lowerCamelCase : List[str] = num_attention_heads __lowerCamelCase : Any = intermediate_size __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : Optional[int] = max_position_embeddings __lowerCamelCase : str = initializer_range __lowerCamelCase : Optional[int] = layer_norm_eps __lowerCamelCase : List[str] = position_embedding_type __lowerCamelCase : int = use_cache __lowerCamelCase : Optional[Any] = emb_layer_norm_before __lowerCamelCase : Optional[Any] = token_dropout __lowerCamelCase : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) __lowerCamelCase : Dict = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) __lowerCamelCase : List[str] = get_default_vocab_list() else: __lowerCamelCase : Optional[Any] = vocab_list else: __lowerCamelCase : Dict = None __lowerCamelCase : Optional[Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def lowercase_ ( self ) -> Any: __lowerCamelCase : Any = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : int = self.esmfold_config.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str = None lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : float = 0 lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : int = 1_2_8 lowerCamelCase : "TrunkConfig" = None def lowercase_ ( self ) -> Any: if self.trunk is None: __lowerCamelCase : List[str] = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Any = TrunkConfig(**self.trunk ) def lowercase_ ( self ) -> int: __lowerCamelCase : Optional[int] = asdict(self ) __lowerCamelCase : str = self.trunk.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 4_8 lowerCamelCase : int = 1_0_2_4 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 3_2 lowerCamelCase : int = 3_2 lowerCamelCase : int = 3_2 lowerCamelCase : float = 0 lowerCamelCase : float = 0 lowerCamelCase : bool = False lowerCamelCase : int = 4 lowerCamelCase : Optional[int] = 1_2_8 lowerCamelCase : "StructureModuleConfig" = None def lowercase_ ( self ) -> Optional[int]: if self.structure_module is None: __lowerCamelCase : Dict = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' f' {self.sequence_state_dim} and {self.sequence_state_dim}.' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' ) __lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width __lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' ) if self.dropout >= 0.4: raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : List[str] = asdict(self ) __lowerCamelCase : int = self.structure_module.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 3_8_4 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 1_6 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 1_2 lowerCamelCase : int = 4 lowerCamelCase : int = 8 lowerCamelCase : float = 0.1 lowerCamelCase : int = 8 lowerCamelCase : int = 1 lowerCamelCase : int = 2 lowerCamelCase : int = 7 lowerCamelCase : int = 1_0 lowerCamelCase : float = 1e-8 lowerCamelCase : float = 1e5 def lowercase_ ( self ) -> Any: return asdict(self ) def UpperCAmelCase__ ( ) -> Optional[Any]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
13
'''simple docstring''' from __future__ import annotations A__ : int = 10 def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]: __lowerCamelCase : List[Any] = 1 __lowerCamelCase : Any = max(UpperCAmelCase_ ) while placement <= max_digit: # declare and initialize empty buckets __lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )] # split list_of_ints between the buckets for i in list_of_ints: __lowerCamelCase : List[Any] = int((i / placement) % RADIX ) buckets[tmp].append(UpperCAmelCase_ ) # put each buckets' contents into list_of_ints __lowerCamelCase : Tuple = 0 for b in range(UpperCAmelCase_ ): for i in buckets[b]: __lowerCamelCase : List[Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , SCREAMING_SNAKE_CASE_=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , SCREAMING_SNAKE_CASE_=True , ) -> Optional[Any]: __lowerCamelCase : int = size if size is not None else {'height': 2_24, 'width': 2_24} __lowerCamelCase : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18} __lowerCamelCase : Tuple = parent __lowerCamelCase : int = batch_size __lowerCamelCase : Dict = num_channels __lowerCamelCase : Any = image_size __lowerCamelCase : Dict = min_resolution __lowerCamelCase : List[Any] = max_resolution __lowerCamelCase : str = do_resize __lowerCamelCase : Union[str, Any] = size __lowerCamelCase : Any = do_center_crop __lowerCamelCase : List[str] = crop_size __lowerCamelCase : Union[str, Any] = do_normalize __lowerCamelCase : Optional[int] = image_mean __lowerCamelCase : int = image_std __lowerCamelCase : List[str] = do_convert_rgb def lowercase_ ( self ) -> List[str]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowercase_ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Dict: assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __lowerCamelCase : Dict = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: __lowerCamelCase : int = [] for i in range(self.batch_size ): __lowerCamelCase , __lowerCamelCase : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __lowerCamelCase : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs] if torchify: __lowerCamelCase : Dict = [torch.from_numpy(SCREAMING_SNAKE_CASE_ ) for x in image_inputs] return image_inputs @require_torch @require_vision class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Dict = ChineseCLIPImageProcessor if is_vision_available() else None def lowercase_ ( self ) -> int: __lowerCamelCase : Any = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE_ ) @property def lowercase_ ( self ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self ) -> List[str]: __lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_resize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_center_crop' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'center_crop' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_normalize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_mean' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_std' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_convert_rgb' ) ) def lowercase_ ( self ) -> List[str]: __lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) __lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def lowercase_ ( self ) -> str: pass def lowercase_ ( self ) -> str: # Initialize image_processing __lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input __lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __lowerCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowercase_ ( self ) -> Tuple: # Initialize image_processing __lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) # Test not batched input __lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __lowerCamelCase : int = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowercase_ ( self ) -> int: # Initialize image_processing __lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) # Test not batched input __lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __lowerCamelCase : List[str] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : str = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = 3 @property def lowercase_ ( self ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_resize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_center_crop' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'center_crop' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_normalize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_mean' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_std' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_convert_rgb' ) ) def lowercase_ ( self ) -> Union[str, Any]: pass def lowercase_ ( self ) -> Dict: # Initialize image_processing __lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input __lowerCamelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
13
'''simple docstring''' from collections import defaultdict from math import gcd def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int: __lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ ) __lowerCamelCase : Any = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ): if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1: continue __lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 / 2_55 , SCREAMING_SNAKE_CASE_=True , ) -> int: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCamelCase : int = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33} __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : str = num_channels __lowerCamelCase : Union[str, Any] = min_resolution __lowerCamelCase : int = max_resolution __lowerCamelCase : str = do_resize __lowerCamelCase : Tuple = size __lowerCamelCase : str = do_normalize __lowerCamelCase : Optional[int] = image_mean __lowerCamelCase : int = image_std __lowerCamelCase : Union[str, Any] = do_rescale __lowerCamelCase : Optional[Any] = rescale_factor __lowerCamelCase : Optional[int] = do_pad def lowercase_ ( self ) -> str: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Any: if not batched: __lowerCamelCase : str = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ): __lowerCamelCase , __lowerCamelCase : Union[str, Any] = image.size else: __lowerCamelCase , __lowerCamelCase : Optional[int] = image.shape[1], image.shape[2] if w < h: __lowerCamelCase : List[Any] = int(self.size['shortest_edge'] * h / w ) __lowerCamelCase : str = self.size['shortest_edge'] elif w > h: __lowerCamelCase : Optional[int] = self.size['shortest_edge'] __lowerCamelCase : List[str] = int(self.size['shortest_edge'] * w / h ) else: __lowerCamelCase : Any = self.size['shortest_edge'] __lowerCamelCase : List[str] = self.size['shortest_edge'] else: __lowerCamelCase : Optional[Any] = [] for image in image_inputs: __lowerCamelCase , __lowerCamelCase : Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCamelCase : Any = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0] __lowerCamelCase : Any = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = DetaImageProcessor if is_vision_available() else None def lowercase_ ( self ) -> str: __lowerCamelCase : Dict = DetaImageProcessingTester(self ) @property def lowercase_ ( self ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self ) -> str: __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_mean' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_std' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_normalize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_resize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_rescale' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_pad' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size' ) ) def lowercase_ ( self ) -> int: __lowerCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} ) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[str]: pass def lowercase_ ( self ) -> Union[str, Any]: # Initialize image_processing __lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input __lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __lowerCamelCase , __lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self ) -> Tuple: # Initialize image_processing __lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) # Test not batched input __lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values __lowerCamelCase , __lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self ) -> Optional[Any]: # Initialize image_processing __lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) # Test not batched input __lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values __lowerCamelCase , __lowerCamelCase : str = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowercase_ ( self ) -> List[Any]: # prepare image and target __lowerCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: __lowerCamelCase : List[Any] = json.loads(f.read() ) __lowerCamelCase : List[str] = {'image_id': 3_97_69, 'annotations': target} # encode them __lowerCamelCase : Union[str, Any] = DetaImageProcessor() __lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) # verify pixel values __lowerCamelCase : Optional[int] = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) # verify area __lowerCamelCase : Optional[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE_ ) ) # verify boxes __lowerCamelCase : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) # verify image_id __lowerCamelCase : Union[str, Any] = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE_ ) ) # verify is_crowd __lowerCamelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE_ ) ) # verify class_labels __lowerCamelCase : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE_ ) ) # verify orig_size __lowerCamelCase : List[Any] = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE_ ) ) # verify size __lowerCamelCase : Optional[Any] = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE_ ) ) @slow def lowercase_ ( self ) -> Optional[Any]: # prepare image, target and masks_path __lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: __lowerCamelCase : Any = json.loads(f.read() ) __lowerCamelCase : int = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target} __lowerCamelCase : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them __lowerCamelCase : Union[str, Any] = DetaImageProcessor(format='coco_panoptic' ) __lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) # verify area __lowerCamelCase : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE_ ) ) # verify boxes __lowerCamelCase : int = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) # verify image_id __lowerCamelCase : Optional[int] = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE_ ) ) # verify is_crowd __lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE_ ) ) # verify class_labels __lowerCamelCase : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE_ ) ) # verify masks __lowerCamelCase : Any = 82_28_73 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE_ ) # verify orig_size __lowerCamelCase : Tuple = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE_ ) ) # verify size __lowerCamelCase : Tuple = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE_ ) )
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A__ : str = logging.get_logger(__name__) A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} A__ : Tuple = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } A__ : str = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } A__ : Tuple = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION lowerCamelCase : Dict = RoFormerTokenizer def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents ): __lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) ) __lowerCamelCase : Union[str, Any] = do_lower_case __lowerCamelCase : str = strip_accents __lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = do_lower_case def __getstate__( self ) -> List[str]: __lowerCamelCase : Union[str, Any] = self.__dict__.copy() __lowerCamelCase : Dict = BertPreTokenizer() return state def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: __lowerCamelCase : Optional[int] = d __lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab() __lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str: __lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]: __lowerCamelCase : List[str] = [self.sep_token_id] __lowerCamelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: __lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any: __lowerCamelCase : Tuple = BertPreTokenizer() return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) A__ : List[Any] = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : List[str] = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys A__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer A__ : int = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast A__ : Dict = TaTokenizerFast A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys A__ : Union[str, Any] = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] ) -> Dict: __lowerCamelCase : Any = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> Optional[Any]: __lowerCamelCase : Optional[Any] = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: __lowerCamelCase : List[str] = s_dict.pop(UpperCAmelCase_ ) elif "subsample" in key: __lowerCamelCase : Optional[int] = s_dict.pop(UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] ) -> int: __lowerCamelCase , __lowerCamelCase : List[str] = emb.weight.shape __lowerCamelCase : List[str] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ ) __lowerCamelCase : int = emb.weight.data return lin_layer def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any ) -> List[Any]: __lowerCamelCase : Tuple = torch.load(UpperCAmelCase_ , map_location='cpu' ) __lowerCamelCase : Any = mam_aaa['args'] __lowerCamelCase : Any = mam_aaa['model'] __lowerCamelCase : Optional[Any] = state_dict['decoder.output_projection.weight'] remove_ignore_keys_(UpperCAmelCase_ ) rename_keys(UpperCAmelCase_ ) __lowerCamelCase : Tuple = state_dict['decoder.embed_tokens.weight'].shape[0] __lowerCamelCase : Optional[int] = args.share_decoder_input_output_embed __lowerCamelCase : Optional[int] = [int(UpperCAmelCase_ ) for i in args.conv_kernel_sizes.split(',' )] __lowerCamelCase : int = SpeechaTextConfig( vocab_size=UpperCAmelCase_ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(UpperCAmelCase_ ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCAmelCase_ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCAmelCase_ , num_beams=5 , max_length=2_00 , use_cache=UpperCAmelCase_ , decoder_start_token_id=2 , early_stopping=UpperCAmelCase_ , ) __lowerCamelCase : str = SpeechaTextForConditionalGeneration(UpperCAmelCase_ ) __lowerCamelCase , __lowerCamelCase : Optional[int] = model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0 and not set(UpperCAmelCase_ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F' but all the following weights are missing {missing}' ) if tie_embeds: __lowerCamelCase : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: __lowerCamelCase : int = lm_head_weights model.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": A__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") A__ : List[str] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
13
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any: super().__init__() __lowerCamelCase : Optional[Any] = initial_learning_rate __lowerCamelCase : Optional[Any] = warmup_steps __lowerCamelCase : Union[str, Any] = power __lowerCamelCase : Optional[int] = decay_schedule_fn __lowerCamelCase : Any = name def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str: with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa ) __lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa ) __lowerCamelCase : List[Any] = global_step_float / warmup_steps_float __lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> Optional[Any]: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int: __lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , ) if num_warmup_steps: __lowerCamelCase : str = WarmUp( initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , ) if weight_decay_rate > 0.0: __lowerCamelCase : List[Any] = AdamWeightDecay( learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , ) else: __lowerCamelCase : Tuple = tf.keras.optimizers.Adam( learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int: super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = weight_decay_rate __lowerCamelCase : str = include_in_weight_decay __lowerCamelCase : List[Any] = exclude_from_weight_decay @classmethod def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict: __lowerCamelCase : Any = {'WarmUp': WarmUp} return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Tuple = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) ) return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowerCamelCase : Optional[int] = apply_state or {} __lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str: __lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tf.control_dependencies([decay] ): return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : Any = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None: return False return True class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self ) -> Tuple: __lowerCamelCase : Tuple = [] __lowerCamelCase : Optional[Any] = None @property def lowercase_ ( self ) -> List[str]: if self._accum_steps is None: __lowerCamelCase : Tuple = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowercase_ ( self ) -> List[str]: if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str: if not self._gradients: __lowerCamelCase : List[str] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ): raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' ) for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ ) self._accum_steps.assign_add(1 ) def lowercase_ ( self ) -> int: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
13
1
'''simple docstring''' from collections.abc import Generator from math import sin def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: if len(UpperCAmelCase_ ) != 32: raise ValueError('Input must be of length 32' ) __lowerCamelCase : Dict = B'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes: if i < 0: raise ValueError('Input must be non-negative' ) __lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:] __lowerCamelCase : str = B'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: __lowerCamelCase : Optional[Any] = B'' for char in message: bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' ) __lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCAmelCase_ ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]: if len(UpperCAmelCase_ ) % 5_12 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ): __lowerCamelCase : Any = bit_string[pos : pos + 5_12] __lowerCamelCase : Optional[int] = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: if i < 0: raise ValueError('Input must be non-negative' ) __lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' ) __lowerCamelCase : Optional[int] = '' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCAmelCase_ , 2 ) def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: return (a + b) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: __lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __lowerCamelCase : Dict = 0x67_45_23_01 __lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89 __lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe __lowerCamelCase : Union[str, Any] = 0x10_32_54_76 __lowerCamelCase : List[str] = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCAmelCase_ ): __lowerCamelCase : Dict = aa __lowerCamelCase : Tuple = ba __lowerCamelCase : List[Any] = ca __lowerCamelCase : Dict = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __lowerCamelCase : List[str] = d ^ (b & (c ^ d)) __lowerCamelCase : Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __lowerCamelCase : Optional[int] = c ^ (d & (b ^ c)) __lowerCamelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: __lowerCamelCase : str = b ^ c ^ d __lowerCamelCase : Any = (3 * i + 5) % 16 else: __lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ )) __lowerCamelCase : int = (7 * i) % 16 __lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32 __lowerCamelCase : Optional[Any] = d __lowerCamelCase : Tuple = c __lowerCamelCase : Optional[int] = b __lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) ) # Add hashed chunk to running total __lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any: __lowerCamelCase : Optional[Any] = parent __lowerCamelCase : int = batch_size __lowerCamelCase : Optional[int] = image_size __lowerCamelCase : Optional[int] = patch_size __lowerCamelCase : Optional[Any] = num_channels __lowerCamelCase : Dict = embed_dim __lowerCamelCase : List[Any] = depths __lowerCamelCase : int = num_heads __lowerCamelCase : Optional[Any] = window_size __lowerCamelCase : Optional[Any] = mlp_ratio __lowerCamelCase : List[str] = qkv_bias __lowerCamelCase : List[str] = hidden_dropout_prob __lowerCamelCase : int = attention_probs_dropout_prob __lowerCamelCase : List[Any] = drop_path_rate __lowerCamelCase : Any = hidden_act __lowerCamelCase : Union[str, Any] = use_absolute_embeddings __lowerCamelCase : Any = patch_norm __lowerCamelCase : Optional[Any] = layer_norm_eps __lowerCamelCase : str = initializer_range __lowerCamelCase : Dict = is_training __lowerCamelCase : Optional[Any] = scope __lowerCamelCase : Dict = use_labels __lowerCamelCase : List[str] = type_sequence_label_size __lowerCamelCase : Dict = encoder_stride __lowerCamelCase : Union[str, Any] = out_features __lowerCamelCase : str = out_indices def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : List[str] = None if self.use_labels: __lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels def lowercase_ ( self ) -> Optional[int]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: __lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : str = ['stem'] __lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs __lowerCamelCase : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase : int = False lowerCamelCase : int = False lowerCamelCase : str = False lowerCamelCase : int = False lowerCamelCase : Union[str, Any] = False def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self ) __lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def lowercase_ ( self ) -> int: pass def lowercase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase_ ( self ) -> Tuple: return def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[str]: __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ ) @unittest.skip('Swin does not use inputs_embeds' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip('Swin does not support feedforward chunking' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : str = [*signature.parameters.keys()] __lowerCamelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def lowercase_ ( self ) -> Any: pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def lowercase_ ( self ) -> List[Any]: pass def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): __lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : int = outputs.hidden_states __lowerCamelCase : Tuple = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # Swin has a different seq_length __lowerCamelCase : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __lowerCamelCase : Dict = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : Optional[int] = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : Union[str, Any] = 3 __lowerCamelCase : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __lowerCamelCase : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __lowerCamelCase : str = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : Tuple = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def lowercase_ ( self ) -> Any: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def lowercase_ ( self ) -> Union[str, Any]: pass def lowercase_ ( self ) -> Tuple: __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Any = 0 return t def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ): with torch.no_grad(): __lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple() def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:' f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has' f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.' ) , ) recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in self.all_model_classes: __lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} ) __lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} ) @require_torch class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase : List[str] = MaskFormerSwinConfig def lowercase_ ( self ) -> Tuple: __lowerCamelCase : List[str] = MaskFormerSwinModelTester(self ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ ) backbone.to(SCREAMING_SNAKE_CASE_ ) backbone.eval() __lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True __lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: __lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(outputs.attentions )
13
1
'''simple docstring''' import math import os import sys def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> str: __lowerCamelCase : List[str] = '' try: with open(UpperCAmelCase_ , 'rb' ) as binary_file: __lowerCamelCase : Union[str, Any] = binary_file.read() for dat in data: __lowerCamelCase : Union[str, Any] = F'{dat:08b}' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def UpperCAmelCase__ ( UpperCAmelCase_ : dict[str, str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> None: lexicon.pop(UpperCAmelCase_ ) __lowerCamelCase : Optional[int] = last_match_id if math.loga(UpperCAmelCase_ ).is_integer(): for curr_key in lexicon: __lowerCamelCase : Dict = '0' + lexicon[curr_key] __lowerCamelCase : Optional[Any] = bin(UpperCAmelCase_ )[2:] def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> str: __lowerCamelCase : List[str] = {'0': '0', '1': '1'} __lowerCamelCase , __lowerCamelCase : Dict = '', '' __lowerCamelCase : Optional[int] = len(UpperCAmelCase_ ) for i in range(len(UpperCAmelCase_ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __lowerCamelCase : Optional[Any] = lexicon[curr_string] result += last_match_id add_key_to_lexicon(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) index += 1 __lowerCamelCase : List[str] = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __lowerCamelCase : Any = lexicon[curr_string] result += last_match_id return result def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str: __lowerCamelCase : Union[str, Any] = os.path.getsize(UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = bin(UpperCAmelCase_ )[2:] __lowerCamelCase : str = len(UpperCAmelCase_ ) return "0" * (length_length - 1) + file_length_binary + compressed def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> None: __lowerCamelCase : Tuple = 8 try: with open(UpperCAmelCase_ , 'wb' ) as opened_file: __lowerCamelCase : Optional[Any] = [ to_write[i : i + byte_length] for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(UpperCAmelCase_ , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> None: __lowerCamelCase : List[Any] = read_file_binary(UpperCAmelCase_ ) __lowerCamelCase : Dict = compress_data(UpperCAmelCase_ ) __lowerCamelCase : Optional[Any] = add_file_length(UpperCAmelCase_ , UpperCAmelCase_ ) write_file_binary(UpperCAmelCase_ , UpperCAmelCase_ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
13
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers A__ : Dict = [ """python""", """tqdm""", """regex""", """requests""", """packaging""", """filelock""", """numpy""", """tokenizers""", """huggingface-hub""", """safetensors""", """accelerate""", """pyyaml""", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]: require_version(deps[pkg] , UpperCAmelCase_ )
13
1
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan A__ : List[Any] = 6_3_7_8_1_3_7.0 A__ : Any = 6_3_5_6_7_5_2.3_1_4_2_4_5 A__ : Optional[Any] = 6378137 def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float: __lowerCamelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A __lowerCamelCase : str = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) ) __lowerCamelCase : Union[str, Any] = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) ) __lowerCamelCase : Union[str, Any] = radians(UpperCAmelCase_ ) __lowerCamelCase : str = radians(UpperCAmelCase_ ) # Equation __lowerCamelCase : Union[str, Any] = sin((phi_a - phi_a) / 2 ) __lowerCamelCase : int = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda __lowerCamelCase : Any = sqrt(sin_sq_phi + (cos(UpperCAmelCase_ ) * cos(UpperCAmelCase_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(UpperCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys A__ : List[str] = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
13
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['image_processor', 'tokenizer'] lowerCamelCase : Union[str, Any] = 'CLIPImageProcessor' lowerCamelCase : Union[str, Any] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Dict = kwargs.pop('feature_extractor' ) __lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Dict: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: __lowerCamelCase : Dict = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if images is not None: __lowerCamelCase : List[str] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if text is not None and images is not None: __lowerCamelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : List[str] = self.tokenizer.model_input_names __lowerCamelCase : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowercase_ ( self ) -> Dict: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE_ , ) return self.image_processor_class @property def lowercase_ ( self ) -> List[str]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE_ , ) return self.image_processor
13
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""") def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) ) A__ : str = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
13
1
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def lowercase_ ( self ) -> int: __lowerCamelCase : Union[str, Any] = tempfile.mkdtemp() __lowerCamelCase : List[str] = 8 # DPR tok __lowerCamelCase : List[str] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __lowerCamelCase : Dict = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __lowerCamelCase : List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __lowerCamelCase : Dict = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) __lowerCamelCase : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __lowerCamelCase : List[Any] = {'unk_token': '<unk>'} __lowerCamelCase : int = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['vocab_file'] ) __lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) ) def lowercase_ ( self ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def lowercase_ ( self ) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def lowercase_ ( self ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def lowercase_ ( self ) -> Dict: shutil.rmtree(self.tmpdirname ) def lowercase_ ( self ) -> int: __lowerCamelCase : int = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def lowercase_ ( self ) -> int: __lowerCamelCase : Optional[Any] = self.get_dummy_dataset() __lowerCamelCase : Any = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __lowerCamelCase : int = dataset __lowerCamelCase : str = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : int = self.get_dummy_dataset() __lowerCamelCase : str = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: __lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , 'dataset' ) __lowerCamelCase : Any = os.path.join(self.tmpdirname , 'index.faiss' ) dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) ) dataset.drop_index('embeddings' ) dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) ) del dataset __lowerCamelCase : int = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __lowerCamelCase : Optional[Any] = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , ) return retriever def lowercase_ ( self ) -> int: __lowerCamelCase : Optional[int] = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) __lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' ) dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' ) pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) ) __lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' ) __lowerCamelCase : int = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , 'wb' ) ) __lowerCamelCase : Optional[int] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) __lowerCamelCase : Optional[Any] = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def lowercase_ ( self ) -> Dict: __lowerCamelCase : Optional[Any] = 1 __lowerCamelCase : Optional[int] = self.get_dummy_canonical_hf_index_retriever() __lowerCamelCase : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def lowercase_ ( self ) -> str: __lowerCamelCase : str = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __lowerCamelCase : Tuple = self.get_dummy_dataset() retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowerCamelCase : int = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) def lowercase_ ( self ) -> int: __lowerCamelCase : str = 1 __lowerCamelCase : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def lowercase_ ( self ) -> Dict: __lowerCamelCase : int = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowerCamelCase : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) def lowercase_ ( self ) -> int: __lowerCamelCase : str = 1 __lowerCamelCase : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowerCamelCase : Tuple = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Union[str, Any] = 1 __lowerCamelCase : Tuple = self.get_dummy_legacy_index_retriever() __lowerCamelCase : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] ) self.assertEqual(len(doc_dicts[0]['text'] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Union[str, Any] = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowerCamelCase : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def lowercase_ ( self ) -> List[Any]: import torch __lowerCamelCase : Union[str, Any] = 1 __lowerCamelCase : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever() __lowerCamelCase : int = [[5, 7], [10, 11]] __lowerCamelCase : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowerCamelCase : Optional[int] = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) __lowerCamelCase : Dict = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def lowercase_ ( self ) -> str: __lowerCamelCase : Dict = self.get_dpr_ctx_encoder_tokenizer() __lowerCamelCase : str = 1 __lowerCamelCase : int = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = [[5, 7], [10, 11]] __lowerCamelCase : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowerCamelCase : str = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual( len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary.
13
'''simple docstring''' from __future__ import annotations import os from collections.abc import Mapping A__ : Optional[Any] = tuple[int, int] class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: __lowerCamelCase : set[int] = vertices __lowerCamelCase : dict[EdgeT, int] = { (min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items() } def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) __lowerCamelCase : Union[str, Any] = weight def lowercase_ ( self ) -> Graph: __lowerCamelCase : Graph = Graph({min(self.vertices )} , {} ) __lowerCamelCase : EdgeT __lowerCamelCase : int __lowerCamelCase : EdgeT __lowerCamelCase : int while len(subgraph.vertices ) < len(self.vertices ): __lowerCamelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __lowerCamelCase : Optional[int] = edge __lowerCamelCase : List[str] = weight subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return subgraph def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int: __lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) ) __lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : dict[EdgeT, int] = {} __lowerCamelCase : list[str] __lowerCamelCase : int __lowerCamelCase : int with open(UpperCAmelCase_ ) as f: __lowerCamelCase : Any = f.read().strip().split('\n' ) __lowerCamelCase : Any = [line.split(',' ) for line in data] for edgea in range(1 , len(UpperCAmelCase_ ) ): for edgea in range(UpperCAmelCase_ ): if adjaceny_matrix[edgea][edgea] != "-": __lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] ) __lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ ) __lowerCamelCase : Graph = graph.prims_algorithm() __lowerCamelCase : int = sum(graph.edges.values() ) __lowerCamelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> str: if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise TypeError('Undefined for non-integers' ) elif precision < 1: raise ValueError('Undefined for non-natural numbers' ) __lowerCamelCase : List[Any] = precision __lowerCamelCase : Tuple = ceil(precision / 14 ) __lowerCamelCase : Union[str, Any] = 42_68_80 * Decimal(1_00_05 ).sqrt() __lowerCamelCase : List[str] = 1 __lowerCamelCase : Dict = 13_59_14_09 __lowerCamelCase : Union[str, Any] = Decimal(UpperCAmelCase_ ) for k in range(1 , UpperCAmelCase_ ): __lowerCamelCase : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCAmelCase_ ) ** 3) linear_term += 5_45_14_01_34 exponential_term *= -26_25_37_41_26_40_76_80_00 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": A__ : Any = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
13
'''simple docstring''' from collections.abc import Generator from math import sin def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: if len(UpperCAmelCase_ ) != 32: raise ValueError('Input must be of length 32' ) __lowerCamelCase : Dict = B'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes: if i < 0: raise ValueError('Input must be non-negative' ) __lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:] __lowerCamelCase : str = B'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: __lowerCamelCase : Optional[Any] = B'' for char in message: bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' ) __lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCAmelCase_ ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]: if len(UpperCAmelCase_ ) % 5_12 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ): __lowerCamelCase : Any = bit_string[pos : pos + 5_12] __lowerCamelCase : Optional[int] = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: if i < 0: raise ValueError('Input must be non-negative' ) __lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' ) __lowerCamelCase : Optional[int] = '' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCAmelCase_ , 2 ) def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: return (a + b) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes: __lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __lowerCamelCase : Dict = 0x67_45_23_01 __lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89 __lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe __lowerCamelCase : Union[str, Any] = 0x10_32_54_76 __lowerCamelCase : List[str] = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCAmelCase_ ): __lowerCamelCase : Dict = aa __lowerCamelCase : Tuple = ba __lowerCamelCase : List[Any] = ca __lowerCamelCase : Dict = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __lowerCamelCase : List[str] = d ^ (b & (c ^ d)) __lowerCamelCase : Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __lowerCamelCase : Optional[int] = c ^ (d & (b ^ c)) __lowerCamelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: __lowerCamelCase : str = b ^ c ^ d __lowerCamelCase : Any = (3 * i + 5) % 16 else: __lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ )) __lowerCamelCase : int = (7 * i) % 16 __lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32 __lowerCamelCase : Optional[Any] = d __lowerCamelCase : Tuple = c __lowerCamelCase : Optional[int] = b __lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) ) # Add hashed chunk to running total __lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def UpperCAmelCase__ ( ) -> List[Any]: __lowerCamelCase : List[Any] = ArgumentParser( description=( 'PyTorch TPU distributed training launch ' 'helper utility that will spawn up ' 'multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=UpperCAmelCase_ , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=UpperCAmelCase_ , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=UpperCAmelCase_ ) return parser.parse_args() def UpperCAmelCase__ ( ) -> Optional[Any]: __lowerCamelCase : str = parse_args() # Import training_script as a module. __lowerCamelCase : Optional[int] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) __lowerCamelCase : Optional[Any] = script_fpath.stem __lowerCamelCase : Dict = importlib.import_module(UpperCAmelCase_ ) # Patch sys.argv __lowerCamelCase : Tuple = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
13
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : Dict = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = 'rwkv' lowerCamelCase : Any = {'max_position_embeddings': 'context_length'} def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: __lowerCamelCase : Optional[int] = vocab_size __lowerCamelCase : Tuple = context_length __lowerCamelCase : str = hidden_size __lowerCamelCase : List[str] = num_hidden_layers __lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size __lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size __lowerCamelCase : Optional[Any] = layer_norm_epsilon __lowerCamelCase : int = rescale_every __lowerCamelCase : Tuple = use_cache __lowerCamelCase : int = bos_token_id __lowerCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' import random from .binary_exp_mod import bin_exp_mod def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=10_00 ) -> Union[str, Any]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __lowerCamelCase : Any = n - 1 __lowerCamelCase : Tuple = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __lowerCamelCase : Tuple = 0 while count < prec: __lowerCamelCase : List[str] = random.randint(2 , n - 1 ) __lowerCamelCase : Tuple = bin_exp_mod(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if b != 1: __lowerCamelCase : Optional[Any] = True for _ in range(UpperCAmelCase_ ): if b == n - 1: __lowerCamelCase : Any = False break __lowerCamelCase : List[str] = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": A__ : Tuple = abs(int(input("""Enter bound : """).strip())) print("""Here's the list of primes:""") print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
13
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int: __lowerCamelCase : Union[str, Any] = 3 __lowerCamelCase : Dict = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[int] = 'microsoft/speecht5_tts' lowerCamelCase : List[str] = ( 'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the ' 'text to read (in English) and returns a waveform object containing the sound.' ) lowerCamelCase : Tuple = 'text_reader' lowerCamelCase : Tuple = SpeechTaProcessor lowerCamelCase : Any = SpeechTaForTextToSpeech lowerCamelCase : List[str] = SpeechTaHifiGan lowerCamelCase : Union[str, Any] = ['text'] lowerCamelCase : List[Any] = ['audio'] def lowercase_ ( self ) -> Dict: if self.post_processor is None: __lowerCamelCase : str = 'microsoft/speecht5_hifigan' super().setup() def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int: __lowerCamelCase : str = self.pre_processor(text=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE_ ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' ) __lowerCamelCase : List[str] = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' ) __lowerCamelCase : List[str] = torch.tensor(embeddings_dataset[73_05]['xvector'] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str: with torch.no_grad(): return self.model.generate_speech(**SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str: with torch.no_grad(): return self.post_processor(SCREAMING_SNAKE_CASE_ ).cpu().detach()
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Dict = XGLMConfig lowerCamelCase : List[str] = {} lowerCamelCase : Union[str, Any] = 'gelu' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any: __lowerCamelCase : int = parent __lowerCamelCase : Optional[int] = batch_size __lowerCamelCase : Optional[Any] = seq_length __lowerCamelCase : Optional[int] = is_training __lowerCamelCase : str = use_input_mask __lowerCamelCase : Dict = use_labels __lowerCamelCase : Union[str, Any] = vocab_size __lowerCamelCase : List[Any] = d_model __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : Optional[Any] = ffn_dim __lowerCamelCase : List[Any] = activation_function __lowerCamelCase : List[Any] = activation_dropout __lowerCamelCase : List[Any] = attention_dropout __lowerCamelCase : Union[str, Any] = max_position_embeddings __lowerCamelCase : Tuple = initializer_range __lowerCamelCase : int = None __lowerCamelCase : int = 0 __lowerCamelCase : Tuple = 2 __lowerCamelCase : Tuple = 1 def lowercase_ ( self ) -> Any: return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Optional[Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) __lowerCamelCase : Optional[int] = None if self.use_input_mask: __lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase : str = self.get_config() __lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase_ ( self ) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> str: __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : str = config_and_inputs __lowerCamelCase : Union[str, Any] = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase : Any = ( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase : List[Any] = False lowerCamelCase : Dict = False lowerCamelCase : Union[str, Any] = False def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : str = TFXGLMModelTester(self ) __lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 ) def lowercase_ ( self ) -> Dict: self.config_tester.run_common_tests() @slow def lowercase_ ( self ) -> Optional[int]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def lowercase_ ( self ) -> Any: super().test_resize_token_embeddings() @require_tf class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]: __lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on __lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ ) @slow def lowercase_ ( self ) -> int: __lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) __lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' ) __lowerCamelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): __lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] ) __lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def lowercase_ ( self ) -> int: __lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Any = 'left' # use different length sentences to test batching __lowerCamelCase : Any = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = inputs['input_ids'] __lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 ) __lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids __lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 ) __lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids __lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 ) __lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
13
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging A__ : Dict = logging.get_logger(__name__) A__ : Union[str, Any] = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : str = 'gpt_neo' lowerCamelCase : str = ['past_key_values'] lowerCamelCase : Optional[int] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , **SCREAMING_SNAKE_CASE_ , ) -> Any: __lowerCamelCase : List[Any] = vocab_size __lowerCamelCase : Union[str, Any] = max_position_embeddings __lowerCamelCase : Union[str, Any] = hidden_size __lowerCamelCase : List[Any] = num_layers __lowerCamelCase : Optional[Any] = num_heads __lowerCamelCase : int = intermediate_size __lowerCamelCase : int = window_size __lowerCamelCase : Optional[Any] = activation_function __lowerCamelCase : List[str] = resid_dropout __lowerCamelCase : Any = embed_dropout __lowerCamelCase : Union[str, Any] = attention_dropout __lowerCamelCase : Union[str, Any] = classifier_dropout __lowerCamelCase : Optional[int] = layer_norm_epsilon __lowerCamelCase : int = initializer_range __lowerCamelCase : Union[str, Any] = use_cache __lowerCamelCase : Dict = bos_token_id __lowerCamelCase : List[Any] = eos_token_id __lowerCamelCase : List[str] = attention_types __lowerCamelCase : Dict = self.expand_attention_types_params(SCREAMING_SNAKE_CASE_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' f'`config.num_layers = {self.num_layers}`. ' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @staticmethod def lowercase_ ( SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Dict = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: import torch __lowerCamelCase : Optional[Any] = input.size() __lowerCamelCase : Dict = len(UpperCAmelCase_ ) __lowerCamelCase : Any = shape[dimension] __lowerCamelCase : Tuple = torch.arange(0 , UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = torch.div(sizedim - size , UpperCAmelCase_ , rounding_mode='floor' ) + 1 __lowerCamelCase : int = torch.arange(UpperCAmelCase_ ) + low_indices[:min_length][:, None] __lowerCamelCase : Tuple = [slice(UpperCAmelCase_ )] * rank __lowerCamelCase : Union[str, Any] = indices __lowerCamelCase : Optional[int] = input[s] __lowerCamelCase : Optional[Any] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) -> Dict: import torch __lowerCamelCase : Optional[Any] = torch.arange(1 , UpperCAmelCase_ ) __lowerCamelCase : List[Any] = torch.remainder(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : List[Any] = remainders == 0 __lowerCamelCase : Dict = candidates[divisor_indices] __lowerCamelCase : Dict = torch.max(UpperCAmelCase_ ) return largest_divisor, torch.div(UpperCAmelCase_ , UpperCAmelCase_ , rounding_mode='floor' ) class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: __lowerCamelCase : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='inputs' ) __lowerCamelCase : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'} else: __lowerCamelCase : Any = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowercase_ ( self ) -> int: return self._config.num_heads def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ) -> Mapping[str, Any]: __lowerCamelCase : List[Any] = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ ) # We need to order the input in the way they appears in the forward() __lowerCamelCase : str = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowerCamelCase , __lowerCamelCase : Any = common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowerCamelCase : Optional[int] = seqlen + 2 __lowerCamelCase : Tuple = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowerCamelCase : List[Any] = [ (torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers ) ] __lowerCamelCase : List[Any] = common_inputs['attention_mask'] if self.use_past: __lowerCamelCase : Any = ordered_inputs['attention_mask'].dtype __lowerCamelCase : Union[str, Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 ) return ordered_inputs @property def lowercase_ ( self ) -> int: return 13
13
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : List[str] = logging.get_logger(__name__) # TODO Update this A__ : Tuple = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = 'esm' def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = vocab_size __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : str = num_hidden_layers __lowerCamelCase : List[str] = num_attention_heads __lowerCamelCase : Any = intermediate_size __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : Optional[int] = max_position_embeddings __lowerCamelCase : str = initializer_range __lowerCamelCase : Optional[int] = layer_norm_eps __lowerCamelCase : List[str] = position_embedding_type __lowerCamelCase : int = use_cache __lowerCamelCase : Optional[Any] = emb_layer_norm_before __lowerCamelCase : Optional[Any] = token_dropout __lowerCamelCase : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) __lowerCamelCase : Dict = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) __lowerCamelCase : List[str] = get_default_vocab_list() else: __lowerCamelCase : Optional[Any] = vocab_list else: __lowerCamelCase : Dict = None __lowerCamelCase : Optional[Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def lowercase_ ( self ) -> Any: __lowerCamelCase : Any = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : int = self.esmfold_config.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str = None lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : bool = False lowerCamelCase : float = 0 lowerCamelCase : bool = True lowerCamelCase : bool = False lowerCamelCase : int = 1_2_8 lowerCamelCase : "TrunkConfig" = None def lowercase_ ( self ) -> Any: if self.trunk is None: __lowerCamelCase : List[str] = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Any = TrunkConfig(**self.trunk ) def lowercase_ ( self ) -> int: __lowerCamelCase : Optional[int] = asdict(self ) __lowerCamelCase : str = self.trunk.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 4_8 lowerCamelCase : int = 1_0_2_4 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 3_2 lowerCamelCase : int = 3_2 lowerCamelCase : int = 3_2 lowerCamelCase : float = 0 lowerCamelCase : float = 0 lowerCamelCase : bool = False lowerCamelCase : int = 4 lowerCamelCase : Optional[int] = 1_2_8 lowerCamelCase : "StructureModuleConfig" = None def lowercase_ ( self ) -> Optional[int]: if self.structure_module is None: __lowerCamelCase : Dict = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' f' {self.sequence_state_dim} and {self.sequence_state_dim}.' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' ) __lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width __lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' ) if self.dropout >= 0.4: raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : List[str] = asdict(self ) __lowerCamelCase : int = self.structure_module.to_dict() return output @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 3_8_4 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 1_6 lowerCamelCase : int = 1_2_8 lowerCamelCase : int = 1_2 lowerCamelCase : int = 4 lowerCamelCase : int = 8 lowerCamelCase : float = 0.1 lowerCamelCase : int = 8 lowerCamelCase : int = 1 lowerCamelCase : int = 2 lowerCamelCase : int = 7 lowerCamelCase : int = 1_0 lowerCamelCase : float = 1e-8 lowerCamelCase : float = 1e5 def lowercase_ ( self ) -> Any: return asdict(self ) def UpperCAmelCase__ ( ) -> Optional[Any]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
13
1
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> bool: __lowerCamelCase : Dict = [int(UpperCAmelCase_ ) for i in ip_va_address.split('.' ) if i.isdigit()] return len(UpperCAmelCase_ ) == 4 and all(0 <= int(UpperCAmelCase_ ) <= 2_54 for octet in octets ) if __name__ == "__main__": A__ : Optional[Any] = input().strip() A__ : Tuple = """valid""" if is_ip_va_address_valid(ip) else """invalid""" print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
13
'''simple docstring''' A__ : dict[tuple[int, int, int], int] = {} def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on __lowerCamelCase : List[Any] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one __lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 __lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter __lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 ) __lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime __lowerCamelCase : Union[str, Any] = prizestrings return prizestrings def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int: return _calculate(UpperCAmelCase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A__ : List[str] = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ """OPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OPTForCausalLM""", """OPTModel""", """OPTPreTrainedModel""", """OPTForSequenceClassification""", """OPTForQuestionAnswering""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ """FlaxOPTForCausalLM""", """FlaxOPTModel""", """FlaxOPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys A__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : str lowerCamelCase : Optional[str] = None lowerCamelCase : Optional[Union[str, int]] = None lowerCamelCase : Optional[Union[str, int]] = None lowerCamelCase : Optional[Union[str, int]] = None def lowercase_ ( self ) -> List[str]: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self ) -> Any: return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def lowercase_ ( self ) -> int: return self.major, self.minor, self.patch def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return Version(SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return other raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' ) def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: try: __lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ ) return self.tuple < other.tuple def __hash__( self ) -> List[str]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase_ ( self ) -> str: return self.version_str def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str: __lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict: return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
13
1
'''simple docstring''' from __future__ import annotations import typing from collections.abc import Iterable import numpy as np A__ : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 A__ : Optional[Any] = typing.Union[np.floataa, int, float] # noqa: UP007 def UpperCAmelCase__ ( UpperCAmelCase_ : Vector , UpperCAmelCase_ : Vector ) -> VectorOut: return np.sqrt(np.sum((np.asarray(UpperCAmelCase_ ) - np.asarray(UpperCAmelCase_ )) ** 2 ) ) def UpperCAmelCase__ ( UpperCAmelCase_ : Vector , UpperCAmelCase_ : Vector ) -> VectorOut: return sum((va - va) ** 2 for va, va in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) ** (1 / 2) if __name__ == "__main__": def UpperCAmelCase__ ( ) -> None: from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) ) benchmark()
13
'''simple docstring''' import sys from collections import defaultdict class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> int: __lowerCamelCase : Any = [] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any: return self.node_position[vertex] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: __lowerCamelCase : Optional[int] = pos def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase : str = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase : Optional[Any] = 2 * start + 1 else: __lowerCamelCase : int = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase : int = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase : str = temp, tempa __lowerCamelCase : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ ) self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Any = position[index] while index != 0: __lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase : Union[str, Any] = heap[parent] __lowerCamelCase : Any = position[parent] self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase : Tuple = val __lowerCamelCase : List[str] = temp self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) break __lowerCamelCase : Tuple = parent else: __lowerCamelCase : Union[str, Any] = val __lowerCamelCase : Tuple = temp self.set_position(SCREAMING_SNAKE_CASE_ , 0 ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: __lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1 for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ): self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : Any = positions[0] __lowerCamelCase : Union[str, Any] = sys.maxsize self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return temp def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str: __lowerCamelCase : List[Any] = Heap() __lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ ) __lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase : Tuple = [] for vertex in range(len(UpperCAmelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCAmelCase_ ) heap.node_position.append(UpperCAmelCase_ ) __lowerCamelCase : Tuple = [] __lowerCamelCase : Dict = 1 __lowerCamelCase : str = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase : Any = 0 __lowerCamelCase : Any = distance heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ ) for _ in range(1 , len(UpperCAmelCase_ ) ): __lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase : Union[str, Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCAmelCase_ )] ): __lowerCamelCase : Dict = distance heap.bottom_to_top( UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > A__ : Tuple = int(input("""Enter number of edges: """).strip()) A__ : str = defaultdict(list) for _ in range(edges_number): A__ : Optional[int] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ : Dict = logging.get_logger(__name__) A__ : List[str] = { """google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""", # See all ViT models at https://huggingface.co/models?filter=vit } class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = 'vit' def __init__( self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=16 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = hidden_size __lowerCamelCase : Any = num_hidden_layers __lowerCamelCase : Union[str, Any] = num_attention_heads __lowerCamelCase : Optional[int] = intermediate_size __lowerCamelCase : Union[str, Any] = hidden_act __lowerCamelCase : Dict = hidden_dropout_prob __lowerCamelCase : int = attention_probs_dropout_prob __lowerCamelCase : Any = initializer_range __lowerCamelCase : str = layer_norm_eps __lowerCamelCase : List[str] = image_size __lowerCamelCase : int = patch_size __lowerCamelCase : List[Any] = num_channels __lowerCamelCase : List[Any] = qkv_bias __lowerCamelCase : Union[str, Any] = encoder_stride class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = version.parse('1.11' ) @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowercase_ ( self ) -> float: return 1E-4
13
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int: __lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6 __lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
13
1
'''simple docstring''' import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A__ : Optional[int] = logging.get_logger(__name__) A__ : int = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""} A__ : int = { """vocab_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""", }, """emoji_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""", }, } A__ : List[Any] = { """abeja/gpt-neox-japanese-2.7b""": 2048, } def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ) -> int: with open(UpperCAmelCase_ , 'r' , encoding='utf-8' ) as f: __lowerCamelCase : Optional[int] = json.loads(f.read() ) __lowerCamelCase : Union[str, Any] = collections.OrderedDict() __lowerCamelCase : Union[str, Any] = collections.OrderedDict() __lowerCamelCase : Optional[Any] = collections.OrderedDict() with open(UpperCAmelCase_ , 'r' , encoding='utf-8' ) as f: __lowerCamelCase : Any = f.readlines() __lowerCamelCase : Tuple = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(UpperCAmelCase_ ): __lowerCamelCase : int = b __lowerCamelCase : Any = idx for wd in b: __lowerCamelCase : Union[str, Any] = idx return vocab, raw_vocab, ids_to_tokens, emoji class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[str] = VOCAB_FILES_NAMES lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Union[str, Any] = ['input_ids', 'attention_mask'] def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|startoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: super().__init__( unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , do_clean_text=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) if not os.path.isfile(SCREAMING_SNAKE_CASE_ ): raise ValueError( f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained' ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(SCREAMING_SNAKE_CASE_ ): raise ValueError( f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google' ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) __lowerCamelCase : int = do_clean_text __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = load_vocab_and_emoji(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def lowercase_ ( self ) -> List[str]: # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab ) def lowercase_ ( self ) -> Any: return dict(self.raw_vocab , **self.added_tokens_encoder ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: return self.subword_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , clean=self.do_clean_text ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int: return self.subword_tokenizer.convert_id_to_token(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE_ ).strip() return out_string def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[int]: __lowerCamelCase : int = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [self.eos_token_id] ) if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length: __lowerCamelCase : List[Any] = input_ids[-self.model_max_length :] return input_ids def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: __lowerCamelCase : List[Any] = 0 if os.path.isdir(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Tuple = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __lowerCamelCase : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: __lowerCamelCase : List[Any] = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) __lowerCamelCase : Any = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) __lowerCamelCase : Dict = token_index writer.write(','.join(SCREAMING_SNAKE_CASE_ ) + '\n' ) index += 1 with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer: json.dump(self.emoji , SCREAMING_SNAKE_CASE_ ) return vocab_file, emoji_file class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : Optional[int] = vocab # same as swe __lowerCamelCase : Dict = ids_to_tokens # same as bpe __lowerCamelCase : str = emoji __lowerCamelCase : str = np.max([len(SCREAMING_SNAKE_CASE_ ) for w in self.vocab.keys()] ) __lowerCamelCase : Union[str, Any] = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) __lowerCamelCase : Optional[Any] = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) __lowerCamelCase : List[Any] = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) __lowerCamelCase : Dict = re.compile( r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) __lowerCamelCase : int = re.compile( r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) __lowerCamelCase : List[str] = re.compile( r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) __lowerCamelCase : Optional[int] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' __lowerCamelCase : Any = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' __lowerCamelCase : Optional[Any] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__( self ) -> Any: return len(self.ids_to_tokens ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: __lowerCamelCase : Any = self.content_repattera.sub('<URL>' , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = self.content_repattera.sub('<EMAIL>' , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self.content_repattera.sub('<TEL>' , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = self.content_repattera.sub('<DATE>' , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self.content_repattera.sub('<DATE>' , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = self.content_repattera.sub('<PRICE>' , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __lowerCamelCase : Optional[int] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' ) return content def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Tuple: __lowerCamelCase : Tuple = text.replace(' ' , '<SP>' ) __lowerCamelCase : Optional[Any] = text.replace(' ' , '<SP>' ) __lowerCamelCase : str = text.replace('\r\n' , '<BR>' ) __lowerCamelCase : List[Any] = text.replace('\n' , '<BR>' ) __lowerCamelCase : Tuple = text.replace('\r' , '<BR>' ) __lowerCamelCase : int = text.replace('\t' , '<TAB>' ) __lowerCamelCase : Optional[int] = text.replace('—' , 'ー' ) __lowerCamelCase : List[Any] = text.replace('−' , 'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: __lowerCamelCase : Tuple = text.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if clean: __lowerCamelCase : Union[str, Any] = self.clean_text(SCREAMING_SNAKE_CASE_ ) def check_simbol(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : List[Any] = x.encode() if len(SCREAMING_SNAKE_CASE_ ) == 1 and len(SCREAMING_SNAKE_CASE_ ) == 2: __lowerCamelCase : Dict = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc2_a1 and c <= 0Xc2_bf) or (c >= 0Xc7_80 and c <= 0Xc7_83) or (c >= 0Xca_b9 and c <= 0Xcb_bf) or (c >= 0Xcc_80 and c <= 0Xcd_a2) ): return True return False def checkuae(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : str = x.encode() if len(SCREAMING_SNAKE_CASE_ ) == 1 and len(SCREAMING_SNAKE_CASE_ ) == 3: __lowerCamelCase : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f: return True return False __lowerCamelCase : Any = 0 __lowerCamelCase : Union[str, Any] = [] while pos < len(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Dict = min(len(SCREAMING_SNAKE_CASE_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 __lowerCamelCase : Dict = [] # (token_id, token, pos) for e in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 ): __lowerCamelCase : List[str] = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(SCREAMING_SNAKE_CASE_ ) > 2: __lowerCamelCase : str = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(SCREAMING_SNAKE_CASE_ ) > 0: # the smallest token_id is adopted __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )[0] result.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = e else: __lowerCamelCase : List[Any] = pos + 1 __lowerCamelCase : int = text[pos:end] if check_simbol(SCREAMING_SNAKE_CASE_ ): result.append('<KIGOU>' ) elif checkuae(SCREAMING_SNAKE_CASE_ ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) __lowerCamelCase : str = end return result def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="\n" ) -> Union[str, Any]: __lowerCamelCase : Dict = [] __lowerCamelCase : int = [] __lowerCamelCase : Optional[Any] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(SCREAMING_SNAKE_CASE_ ) > 0: words.append(bytearray(SCREAMING_SNAKE_CASE_ ).decode('utf-8' , errors='replace' ) ) __lowerCamelCase : Union[str, Any] = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(SCREAMING_SNAKE_CASE_ ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: words.append(bytearray(SCREAMING_SNAKE_CASE_ ).decode('utf-8' , errors='replace' ) ) __lowerCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ) return text
13
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]: __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Dict = batch_size __lowerCamelCase : int = image_size __lowerCamelCase : List[str] = patch_size __lowerCamelCase : Optional[int] = num_channels __lowerCamelCase : Any = is_training __lowerCamelCase : Dict = use_labels __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Optional[Any] = num_attention_heads __lowerCamelCase : Dict = intermediate_size __lowerCamelCase : Union[str, Any] = hidden_act __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : str = type_sequence_label_size __lowerCamelCase : List[str] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (image_size // patch_size) ** 2 __lowerCamelCase : Optional[int] = num_patches + 1 def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : Optional[int] = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (self.image_size, self.image_size) __lowerCamelCase : str = (self.patch_size, self.patch_size) __lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: __lowerCamelCase : Tuple = self.type_sequence_label_size __lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCamelCase : List[str] = 1 __lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : int = config_and_inputs __lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowercase_ ( self ) -> None: __lowerCamelCase : str = FlaxViTModelTester(self ) __lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def lowercase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : List[str] = [*signature.parameters.keys()] __lowerCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest('JIT Enabled' ): __lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self ) -> List[Any]: for model_class_name in self.all_model_classes: __lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' ) __lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule A__ : str = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys A__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import argparse A__ : Optional[Any] = """docs/source/_static/js/custom.js""" def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int: with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f: __lowerCamelCase : Dict = f.readlines() __lowerCamelCase : Tuple = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 __lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += F' "v{version}": "v{version}",\n' with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(UpperCAmelCase_ ) if __name__ == "__main__": A__ : str = argparse.ArgumentParser() parser.add_argument("""--version""", help="""Release version.""") A__ : Any = parser.parse_args() update_custom_js(args.version)
13
1
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version A__ : Union[str, Any] = get_logger(__name__) class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : int = 'dummy_data' lowerCamelCase : Tuple = 'datasets' lowerCamelCase : Union[str, Any] = False def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , ) -> str: __lowerCamelCase : Optional[Any] = 0 __lowerCamelCase : List[str] = dataset_name __lowerCamelCase : Any = cache_dir __lowerCamelCase : List[str] = use_local_dummy_data __lowerCamelCase : Any = config # download_callbacks take a single url as input __lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __lowerCamelCase : Any = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __lowerCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ ) # to be downloaded __lowerCamelCase : int = None __lowerCamelCase : List[Any] = None @property def lowercase_ ( self ) -> Union[str, Any]: if self._dummy_file is None: __lowerCamelCase : Dict = self.download_dummy_data() return self._dummy_file @property def lowercase_ ( self ) -> int: if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('dummy' , self.version_name ) @property def lowercase_ ( self ) -> Any: return os.path.join(self.dummy_data_folder , 'dummy_data.zip' ) def lowercase_ ( self ) -> Any: __lowerCamelCase : int = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __lowerCamelCase : Union[str, Any] = cached_path( SCREAMING_SNAKE_CASE_ , cache_dir=self.cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ , force_extract=SCREAMING_SNAKE_CASE_ ) return os.path.join(SCREAMING_SNAKE_CASE_ , self.dummy_file_name ) @property def lowercase_ ( self ) -> List[Any]: return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowercase_ ( self ) -> Optional[Any]: if self._bucket_url is None: __lowerCamelCase : Dict = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) ) return self._bucket_url @property def lowercase_ ( self ) -> str: # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) -> List[str]: if self.load_existing_dummy_data: # dummy data is downloaded and tested __lowerCamelCase : Optional[Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __lowerCamelCase : Any = self.dummy_file_name # special case when data_url is a dict if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return self.create_dummy_data_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): return self.create_dummy_data_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: return self.create_dummy_data_single(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) -> Any: return self.download_and_extract(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: return self.download_and_extract(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: return path def lowercase_ ( self ) -> List[str]: return {} def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Optional[Any] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for single_url in single_urls: download_callback(SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase : Union[str, Any] = single_urls download_callback(SCREAMING_SNAKE_CASE_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : int = [os.path.join(SCREAMING_SNAKE_CASE_ , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE_ ).name ) ) for x in single_urls] else: __lowerCamelCase : str = single_urls __lowerCamelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE_ ).name ) ) __lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique __lowerCamelCase : int = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Tuple = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __lowerCamelCase : Optional[Any] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , SCREAMING_SNAKE_CASE_ ) ) for url in data_url ) __lowerCamelCase : Dict = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): __lowerCamelCase : Dict = [data_url[0]] * len(SCREAMING_SNAKE_CASE_ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(SCREAMING_SNAKE_CASE_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __lowerCamelCase : str = os.path.join(SCREAMING_SNAKE_CASE_ , urllib.parse.quote_plus(single_url.split('/' )[-1] ) ) dummy_data_list.append(SCREAMING_SNAKE_CASE_ ) return dummy_data_list def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: for download_callback in self.download_callbacks: download_callback(SCREAMING_SNAKE_CASE_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , urllib.parse.quote_plus(data_url.split('/' )[-1] ) ) if os.path.exists(SCREAMING_SNAKE_CASE_ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowercase_ ( self ) -> Union[str, Any]: pass def lowercase_ ( self ) -> Tuple: pass def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Tuple: def _iter_archive_members(SCREAMING_SNAKE_CASE_ ): # this preserves the order of the members inside the ZIP archive __lowerCamelCase : List[Any] = Path(self.dummy_file ).parent __lowerCamelCase : str = path.relative_to(SCREAMING_SNAKE_CASE_ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: __lowerCamelCase : Tuple = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = Path(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = _iter_archive_members(SCREAMING_SNAKE_CASE_ ) if self.use_local_dummy_data else path.rglob('*' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__') ): yield file_path.relative_to(SCREAMING_SNAKE_CASE_ ).as_posix(), file_path.open('rb' ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(SCREAMING_SNAKE_CASE_ ): if os.path.basename(SCREAMING_SNAKE_CASE_ ).startswith(('.', '__') ): return yield path else: for dirpath, dirnames, filenames in os.walk(SCREAMING_SNAKE_CASE_ ): if os.path.basename(SCREAMING_SNAKE_CASE_ ).startswith(('.', '__') ): continue dirnames.sort() for filename in sorted(SCREAMING_SNAKE_CASE_ ): if filename.startswith(('.', '__') ): continue yield os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
13
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape __lowerCamelCase : Dict = jax.image.resize( SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) __lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ ) return hidden_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> List[str]: __lowerCamelCase : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ ) return hidden_states class UpperCAmelCase_ (nn.Module ): """simple docstring""" lowerCamelCase : int lowerCamelCase : int = None lowerCamelCase : float = 0.0 lowerCamelCase : bool = None lowerCamelCase : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels __lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __lowerCamelCase : Tuple = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype ) __lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __lowerCamelCase : int = nn.Dropout(self.dropout_prob ) __lowerCamelCase : Union[str, Any] = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __lowerCamelCase : List[Any] = None if use_nin_shortcut: __lowerCamelCase : Any = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple: __lowerCamelCase : List[Any] = hidden_states __lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 ) __lowerCamelCase : Optional[int] = hidden_states + temb __lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ ) if self.conv_shortcut is not None: __lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ ) return hidden_states + residual
13
1
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput A__ : int = 8 def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=BITS ) -> int: __lowerCamelCase : List[Any] = x.device __lowerCamelCase : Union[str, Any] = (x * 2_55).int().clamp(0 , 2_55 ) __lowerCamelCase : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCAmelCase_ ) __lowerCamelCase : List[str] = rearrange(UpperCAmelCase_ , 'd -> d 1 1' ) __lowerCamelCase : Optional[int] = rearrange(UpperCAmelCase_ , 'b c h w -> b c 1 h w' ) __lowerCamelCase : int = ((x & mask) != 0).float() __lowerCamelCase : List[Any] = rearrange(UpperCAmelCase_ , 'b c d h w -> b (c d) h w' ) __lowerCamelCase : str = bits * 2 - 1 return bits def UpperCAmelCase__ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=BITS ) -> Tuple: __lowerCamelCase : int = x.device __lowerCamelCase : Tuple = (x > 0).int() __lowerCamelCase : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCAmelCase_ , dtype=torch.intaa ) __lowerCamelCase : Tuple = rearrange(UpperCAmelCase_ , 'd -> d 1 1' ) __lowerCamelCase : List[Any] = rearrange(UpperCAmelCase_ , 'b (c d) h w -> b c d h w' , d=8 ) __lowerCamelCase : Dict = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 2_55).clamp(0.0 , 1.0 ) def UpperCAmelCase__ ( self : str , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) __lowerCamelCase : Tuple = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas __lowerCamelCase : Any = self.alphas_cumprod[timestep] __lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod __lowerCamelCase : Dict = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowerCamelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" __lowerCamelCase : Any = self.bit_scale if self.config.clip_sample: __lowerCamelCase : Tuple = torch.clamp(UpperCAmelCase_ , -scale , UpperCAmelCase_ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) __lowerCamelCase : int = self._get_variance(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : Any = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide __lowerCamelCase : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowerCamelCase : List[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowerCamelCase : List[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 __lowerCamelCase : int = model_output.device if torch.is_tensor(UpperCAmelCase_ ) else 'cpu' __lowerCamelCase : List[Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase_ ).to(UpperCAmelCase_ ) __lowerCamelCase : int = self._get_variance(UpperCAmelCase_ , UpperCAmelCase_ ) ** 0.5 * eta * noise __lowerCamelCase : Tuple = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int="epsilon" , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]: __lowerCamelCase : Tuple = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: __lowerCamelCase , __lowerCamelCase : int = torch.split(UpperCAmelCase_ , sample.shape[1] , dim=1 ) else: __lowerCamelCase : Union[str, Any] = None # 1. compute alphas, betas __lowerCamelCase : Tuple = self.alphas_cumprod[t] __lowerCamelCase : Optional[Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one __lowerCamelCase : Optional[Any] = 1 - alpha_prod_t __lowerCamelCase : int = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": __lowerCamelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": __lowerCamelCase : Union[str, Any] = model_output else: raise ValueError(F'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" __lowerCamelCase : List[Any] = self.bit_scale if self.config.clip_sample: __lowerCamelCase : List[Any] = torch.clamp(UpperCAmelCase_ , -scale , UpperCAmelCase_ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t __lowerCamelCase : Dict = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __lowerCamelCase : Any = 0 if t > 0: __lowerCamelCase : str = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCAmelCase_ ).to(model_output.device ) __lowerCamelCase : str = (self._get_variance(UpperCAmelCase_ , predicted_variance=UpperCAmelCase_ ) ** 0.5) * noise __lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ ) class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , ) -> Dict: super().__init__() __lowerCamelCase : Tuple = bit_scale __lowerCamelCase : str = ( ddim_bit_scheduler_step if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> Union[Tuple, ImagePipelineOutput]: __lowerCamelCase : Optional[Any] = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Optional[Any] = decimal_to_bits(SCREAMING_SNAKE_CASE_ ) * self.bit_scale __lowerCamelCase : Optional[int] = latents.to(self.device ) self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual __lowerCamelCase : List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample # compute the previous noisy sample x_t -> x_t-1 __lowerCamelCase : Tuple = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample __lowerCamelCase : Dict = bits_to_decimal(SCREAMING_SNAKE_CASE_ ) if output_type == "pil": __lowerCamelCase : Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
13
'''simple docstring''' from __future__ import annotations A__ : int = 10 def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]: __lowerCamelCase : List[Any] = 1 __lowerCamelCase : Any = max(UpperCAmelCase_ ) while placement <= max_digit: # declare and initialize empty buckets __lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )] # split list_of_ints between the buckets for i in list_of_ints: __lowerCamelCase : List[Any] = int((i / placement) % RADIX ) buckets[tmp].append(UpperCAmelCase_ ) # put each buckets' contents into list_of_ints __lowerCamelCase : Tuple = 0 for b in range(UpperCAmelCase_ ): for i in buckets[b]: __lowerCamelCase : List[Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
13
1