code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowerCAmelCase_ ( ): __magic_name__ : List[str] ={ """repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""], """path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""], """content""": ["""a """ * 20, """a """ * 30, """b """ * 7], } __magic_name__ : List[Any] =Dataset.from_dict(lowerCamelCase ) return dataset class __A ( UpperCamelCase__ ): def A__ ( self :List[str] ): '''simple docstring''' __magic_name__ : List[Any] =get_dataset() __magic_name__ : List[str] =make_duplicate_clusters(__snake_case , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def A__ ( self :Dict ): '''simple docstring''' __magic_name__ : Dict =get_dataset() __magic_name__ , __magic_name__ : List[str] =deduplicate_dataset(__snake_case ) self.assertEqual(len(__snake_case ) , 2 ) print(__snake_case ) self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 ) self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __snake_case )
21
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class a ( a__ ): snake_case__ = '''megatron-bert''' def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , **_snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache
4
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class A ( unittest.TestCase ): def __lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" _a = tempfile.mkdtemp() _a = BlipImageProcessor() _a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) _a = BlipaProcessor(lowerCAmelCase_ , lowerCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : int , **lowerCAmelCase_ : Any ) -> Any: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer def __lowerCAmelCase ( self : Tuple , **lowerCAmelCase_ : Any ) -> Any: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self : str ) -> int: """simple docstring""" _a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _a = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" _a = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _a = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 ) _a = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> str: """simple docstring""" _a = self.get_image_processor() _a = self.get_tokenizer() _a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) _a = self.prepare_image_inputs() _a = image_processor(lowerCAmelCase_ , return_tensors='''np''' ) _a = processor(images=lowerCAmelCase_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowerCAmelCase ( self : Any ) -> str: """simple docstring""" _a = self.get_image_processor() _a = self.get_tokenizer() _a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) _a = '''lower newer''' _a = processor(text=lowerCAmelCase_ ) _a = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" _a = self.get_image_processor() _a = self.get_tokenizer() _a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) _a = '''lower newer''' _a = self.prepare_image_inputs() _a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase_ ): processor() def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" _a = self.get_image_processor() _a = self.get_tokenizer() _a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) _a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a = processor.batch_decode(lowerCAmelCase_ ) _a = tokenizer.batch_decode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" _a = self.get_image_processor() _a = self.get_tokenizer() _a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) _a = '''lower newer''' _a = self.prepare_image_inputs() _a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
22
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
4
0
import sys import turtle def _snake_case (__lowercase , __lowercase): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) if depth == 0: return triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1) triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1) triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( """Correct format for using this script: """ """python fractals.py <int:depth_for_fractal>""" ) snake_case__ : Tuple = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("""red""") snake_case__ : Optional[Any] = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
23
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class a ( a__ ): snake_case__ = 42 class a ( a__ , a__ ): @register_to_config def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ): """simple docstring""" super().__init__() # pass init params to Encoder lowerCAmelCase = Encoder( in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , ) lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 ) lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case ) lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 ) # pass init params to Decoder lowerCAmelCase = Decoder( in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , ) @apply_forward_hook def UpperCamelCase__ ( self , _snake_case , _snake_case = True ): """simple docstring""" lowerCAmelCase = self.encoder(_snake_case ) lowerCAmelCase = self.quant_conv(_snake_case ) if not return_dict: return (h,) return VQEncoderOutput(latents=_snake_case ) @apply_forward_hook def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ): """simple docstring""" if not force_not_quantize: lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case ) else: lowerCAmelCase = h lowerCAmelCase = self.post_quant_conv(_snake_case ) lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case = True ): """simple docstring""" lowerCAmelCase = sample lowerCAmelCase = self.encode(_snake_case ).latents lowerCAmelCase = self.decode(_snake_case ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_snake_case )
4
0
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCAmelCase ( unittest.TestCase): def lowerCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() __snake_case = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) __snake_case = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } __snake_case = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_6000, '''return_attention_mask''': False, '''do_normalize''': True, } __snake_case = tempfile.mkdtemp() __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __snake_case = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) # load decoder from hub __snake_case = '''hf-internal-testing/ngram-beam-search-decoder''' def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' __snake_case = self.add_kwargs_tokens_map.copy() kwargs.update(__SCREAMING_SNAKE_CASE ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self ) -> int: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case = self.get_tokenizer() __snake_case = self.get_feature_extractor() __snake_case = self.get_decoder() __snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) __snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __SCREAMING_SNAKE_CASE ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __snake_case = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowerCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''include''' ): WavaVecaProcessorWithLM( tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case = self.get_feature_extractor() __snake_case = self.get_tokenizer() __snake_case = self.get_decoder() __snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) __snake_case = floats_list((3, 1000) ) __snake_case = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) __snake_case = processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case = self.get_feature_extractor() __snake_case = self.get_tokenizer() __snake_case = self.get_decoder() __snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) __snake_case = '''This is a test string''' __snake_case = processor(text=__SCREAMING_SNAKE_CASE ) __snake_case = tokenizer(__SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=(2, 10, 16) , __SCREAMING_SNAKE_CASE=77 ) -> Tuple: '''simple docstring''' np.random.seed(__SCREAMING_SNAKE_CASE ) return np.random.rand(*__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case = self.get_feature_extractor() __snake_case = self.get_tokenizer() __snake_case = self.get_decoder() __snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) __snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 ) __snake_case = processor.decode(__SCREAMING_SNAKE_CASE ) __snake_case = decoder.decode_beams(__SCREAMING_SNAKE_CASE )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' __snake_case = self.get_feature_extractor() __snake_case = self.get_tokenizer() __snake_case = self.get_decoder() __snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) __snake_case = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __snake_case = processor.batch_decode(__SCREAMING_SNAKE_CASE ) else: with get_context(__SCREAMING_SNAKE_CASE ).Pool() as pool: __snake_case = processor.batch_decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __snake_case = list(__SCREAMING_SNAKE_CASE ) with get_context('''fork''' ).Pool() as p: __snake_case = decoder.decode_beams_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __snake_case , __snake_case , __snake_case = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(__SCREAMING_SNAKE_CASE , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(__SCREAMING_SNAKE_CASE , decoded_processor.logit_score ) self.assertListEqual(__SCREAMING_SNAKE_CASE , decoded_processor.lm_score ) def lowerCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case = self.get_feature_extractor() __snake_case = self.get_tokenizer() __snake_case = self.get_decoder() __snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) __snake_case = self._get_dummy_logits() __snake_case = 15 __snake_case = -20.0 __snake_case = -4.0 __snake_case = processor.batch_decode( __SCREAMING_SNAKE_CASE , beam_width=__SCREAMING_SNAKE_CASE , beam_prune_logp=__SCREAMING_SNAKE_CASE , token_min_logp=__SCREAMING_SNAKE_CASE , ) __snake_case = decoded_processor_out.text __snake_case = list(__SCREAMING_SNAKE_CASE ) with get_context('''fork''' ).Pool() as pool: __snake_case = decoder.decode_beams_batch( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , beam_width=__SCREAMING_SNAKE_CASE , beam_prune_logp=__SCREAMING_SNAKE_CASE , token_min_logp=__SCREAMING_SNAKE_CASE , ) __snake_case = [d[0][0] for d in decoded_decoder_out] __snake_case = [d[0][2] for d in decoded_decoder_out] __snake_case = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __SCREAMING_SNAKE_CASE ) self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) def lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case = self.get_feature_extractor() __snake_case = self.get_tokenizer() __snake_case = self.get_decoder() __snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) __snake_case = self._get_dummy_logits() __snake_case = 2.0 __snake_case = 5.0 __snake_case = -20.0 __snake_case = True __snake_case = processor.batch_decode( __SCREAMING_SNAKE_CASE , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , unk_score_offset=__SCREAMING_SNAKE_CASE , lm_score_boundary=__SCREAMING_SNAKE_CASE , ) __snake_case = decoded_processor_out.text __snake_case = list(__SCREAMING_SNAKE_CASE ) decoder.reset_params( alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , unk_score_offset=__SCREAMING_SNAKE_CASE , lm_score_boundary=__SCREAMING_SNAKE_CASE , ) with get_context('''fork''' ).Pool() as pool: __snake_case = decoder.decode_beams_batch( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) __snake_case = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __SCREAMING_SNAKE_CASE ) __snake_case = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __snake_case = processor.decoder.model_container[processor.decoder._model_key] __snake_case = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __snake_case = os.listdir(__SCREAMING_SNAKE_CASE ) __snake_case = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case = snapshot_download('''hf-internal-testing/processor_with_lm''' ) __snake_case = WavaVecaProcessorWithLM.from_pretrained(__SCREAMING_SNAKE_CASE ) __snake_case = processor.decoder.model_container[processor.decoder._model_key] __snake_case = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __snake_case = os.listdir(__SCREAMING_SNAKE_CASE ) __snake_case = os.listdir(__SCREAMING_SNAKE_CASE ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __snake_case = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __snake_case = floats_list((3, 1000) ) __snake_case = processor_wavaveca(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) __snake_case = processor_auto(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) __snake_case = self._get_dummy_logits() __snake_case = processor_wavaveca.batch_decode(__SCREAMING_SNAKE_CASE ) __snake_case = processor_auto.batch_decode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowerCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case = self.get_feature_extractor() __snake_case = self.get_tokenizer() __snake_case = self.get_decoder() __snake_case = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' __snake_case = [d[key] for d in offsets] return retrieved_list def lowerCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __snake_case = self._get_dummy_logits()[0] __snake_case = processor.decode(__SCREAMING_SNAKE_CASE , output_word_offsets=__SCREAMING_SNAKE_CASE ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def lowerCAmelCase ( self ) -> str: '''simple docstring''' __snake_case = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __snake_case = self._get_dummy_logits() __snake_case = processor.batch_decode(__SCREAMING_SNAKE_CASE , output_word_offsets=__SCREAMING_SNAKE_CASE ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' import torch __snake_case = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__SCREAMING_SNAKE_CASE ) __snake_case = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) ) __snake_case = iter(__SCREAMING_SNAKE_CASE ) __snake_case = next(__SCREAMING_SNAKE_CASE ) __snake_case = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) __snake_case = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __snake_case = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): __snake_case = model(__SCREAMING_SNAKE_CASE ).logits.cpu().numpy() __snake_case = processor.decode(logits[0] , output_word_offsets=__SCREAMING_SNAKE_CASE ) __snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __snake_case = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] __snake_case = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''word''' ) ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(''' '''.join(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''word''' ) ) , output.text ) # output times __snake_case = torch.tensor(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''start_time''' ) ) __snake_case = torch.tensor(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''end_time''' ) ) # fmt: off __snake_case = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) __snake_case = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=0.01 ) ) self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=0.01 ) )
24
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping __UpperCamelCase : Optional[Any] = tuple[int, int] class a : def __init__( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = vertices lowerCAmelCase = { (min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items() } def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCAmelCase = weight def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Graph({min(self.vertices )} , {} ) lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 while len(subgraph.vertices ) < len(self.vertices ): lowerCAmelCase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCAmelCase = edge lowerCAmelCase = weight subgraph.add_edge(_snake_case , _snake_case ) return subgraph def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ): lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) ) lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = {} lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 with open(_UpperCAmelCase ) as f: lowerCAmelCase = f.read().strip().split('\n' ) lowerCAmelCase = [line.split(',' ) for line in data] for edgea in range(1 , len(_UpperCAmelCase ) ): for edgea in range(_UpperCAmelCase ): if adjaceny_matrix[edgea][edgea] != "-": lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] ) lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase ) lowerCAmelCase = graph.prims_algorithm() lowerCAmelCase = sum(graph.edges.values() ) lowerCAmelCase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
4
0
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def lowerCamelCase__ ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ): if attention_mask is None: SCREAMING_SNAKE_CASE : Optional[int] = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE : Tuple = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: SCREAMING_SNAKE_CASE : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_a) if decoder_head_mask is None: SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_a) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_a) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class _UpperCamelCase : '''simple docstring''' def __init__( self : List[Any] , a : Optional[Any] , a : Any=13 , a : Optional[int]=7 , a : Optional[Any]=True , a : Dict=False , a : List[str]=99 , a : Any=16 , a : Optional[int]=2 , a : Union[str, Any]=4 , a : List[Any]=4 , a : Dict="relu" , a : Any=0.1 , a : Optional[Any]=0.1 , a : str=0.0 , a : List[Any]=0.0 , a : Dict=20 , a : Optional[int]=2 , a : Optional[Any]=1 , a : Any=0 , ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[Any] = batch_size SCREAMING_SNAKE_CASE : str = seq_length SCREAMING_SNAKE_CASE : str = is_training SCREAMING_SNAKE_CASE : List[Any] = use_labels SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : Dict = hidden_size SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Dict = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop SCREAMING_SNAKE_CASE : int = decoder_layerdrop SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings SCREAMING_SNAKE_CASE : List[str] = eos_token_id SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id def __UpperCamelCase ( self : int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Any = self.eos_token_id # Eos Token SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input SCREAMING_SNAKE_CASE : int = input_ids.clamp(self.pad_token_id + 1 ) SCREAMING_SNAKE_CASE : str = decoder_input_ids.clamp(self.pad_token_id + 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config() SCREAMING_SNAKE_CASE : str = prepare_mam_aaa_inputs_dict(a , a , a ) return config, inputs_dict def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def __UpperCamelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs() return config, inputs_dict def __UpperCamelCase ( self : Dict , a : Tuple , a : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = MaMaaaModel(config=a ).get_decoder().to(a ).eval() SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict["input_ids"] SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"] SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict["head_mask"] # first forward pass SCREAMING_SNAKE_CASE : Union[str, Any] = model(a , attention_mask=a , head_mask=a , use_cache=a ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and SCREAMING_SNAKE_CASE : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) SCREAMING_SNAKE_CASE : int = model(a , attention_mask=a )["last_hidden_state"] SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , past_key_values=a )[ "last_hidden_state" ] # select random slice SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1e-2 ) ) def __UpperCamelCase ( self : Any , a : Any , a : Any ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = MaMaaaModel(config=a ).to(a ).eval() SCREAMING_SNAKE_CASE : str = model(**a ) SCREAMING_SNAKE_CASE : Dict = outputs.encoder_last_hidden_state SCREAMING_SNAKE_CASE : Dict = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Optional[Any] = model.get_encoder() encoder.save_pretrained(a ) SCREAMING_SNAKE_CASE : Dict = MaMaaaEncoder.from_pretrained(a ).to(a ) SCREAMING_SNAKE_CASE : Optional[Any] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Optional[Any] = model.get_decoder() decoder.save_pretrained(a ) SCREAMING_SNAKE_CASE : str = MaMaaaDecoder.from_pretrained(a ).to(a ) SCREAMING_SNAKE_CASE : Dict = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=a , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class _UpperCamelCase ( __A , __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) lowerCamelCase__ =(MaMaaaForConditionalGeneration,) if is_torch_available() else () lowerCamelCase__ =( { 'conversational': MaMaaaForConditionalGeneration, 'feature-extraction': MaMaaaModel, 'summarization': MaMaaaForConditionalGeneration, 'text2text-generation': MaMaaaForConditionalGeneration, 'translation': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =False lowerCamelCase__ =False def __UpperCamelCase ( self : List[Any] , a : Tuple , a : int , a : Optional[Any] , a : Optional[Any] , a : Tuple ) -> Dict: """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def __UpperCamelCase ( self : Tuple ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : str = MaMaaaModelTester(self ) SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=a ) def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : Any ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Dict = model_class(a ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(a ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a , output_loading_info=a ) self.assertEqual(info["missing_keys"] , [] ) def __UpperCamelCase ( self : Optional[int] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*a ) def __UpperCamelCase ( self : str ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*a ) def __UpperCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): SCREAMING_SNAKE_CASE : List[str] = model_class(a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self._prepare_for_class(a , a ) ) if not self.is_encoder_decoder: SCREAMING_SNAKE_CASE : Optional[Any] = inputs["input_ids"] del inputs["input_ids"] else: SCREAMING_SNAKE_CASE : str = inputs["input_ids"] SCREAMING_SNAKE_CASE : Optional[Any] = inputs.get("decoder_input_ids" , a ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , a ) SCREAMING_SNAKE_CASE : Any = model.get_input_embeddings() if not self.is_encoder_decoder: SCREAMING_SNAKE_CASE : Tuple = wte(a ) else: SCREAMING_SNAKE_CASE : List[str] = wte(a ) SCREAMING_SNAKE_CASE : Dict = wte(a ) with torch.no_grad(): model(**a )[0] def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : Optional[int] = input_dict["input_ids"] SCREAMING_SNAKE_CASE : Optional[int] = input_ids.ne(1 ).to(a ) SCREAMING_SNAKE_CASE : str = MaMaaaForConditionalGeneration(a ).eval().to(a ) if torch_device == "cuda": model.half() model.generate(a , attention_mask=a ) model.generate(num_beams=4 , do_sample=a , early_stopping=a , num_return_sequences=3 ) def lowerCamelCase__ ( _a): return torch.tensor(_a , dtype=torch.long , device=_a) a_ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def __UpperCamelCase ( self : Any ) -> Tuple: """simple docstring""" return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : str = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(a ) SCREAMING_SNAKE_CASE : List[str] = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] ) SCREAMING_SNAKE_CASE : Union[str, Any] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] ) SCREAMING_SNAKE_CASE : Any = prepare_mam_aaa_inputs_dict(model.config , a , a ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**a )[0] SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 1024) ) self.assertEqual(output.shape , a ) # change to expected output here SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=a ) self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=a ) ) def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : int = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(a ) # change to intended input SCREAMING_SNAKE_CASE : str = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] ) SCREAMING_SNAKE_CASE : Dict = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] ) SCREAMING_SNAKE_CASE : Any = prepare_mam_aaa_inputs_dict(model.config , a , a ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a )[0] SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , a ) # change to expected output here SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=a ) self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=a ) ) def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(a ) SCREAMING_SNAKE_CASE : Tuple = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) SCREAMING_SNAKE_CASE : str = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(a , padding=a , return_tensors="pt" ) SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate( input_ids=dct["input_ids"].to(a ) , attention_mask=dct["attention_mask"].to(a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) SCREAMING_SNAKE_CASE : str = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=a , skip_special_tokens=a ) assert generated == expected_en
25
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] ) lowerCAmelCase = np.array(_UpperCAmelCase ) lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = (1, 2, 1) lowerCAmelCase = (1, 1, 0, 7) lowerCAmelCase = SARIMAX( _UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase ) lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' ) lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] ) return result[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = regressor.predict(_UpperCAmelCase ) return y_pred[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ): train_user.sort() lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 ) lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 ) lowerCAmelCase = qa - qa lowerCAmelCase = qa - (iqr * 0.1) return low_lim def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ): lowerCAmelCase = 0 lowerCAmelCase = 0 for i in list_vote: if i > actual_result: lowerCAmelCase = not_safe + 1 else: if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]] __UpperCamelCase : Any = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) __UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values) # split data __UpperCamelCase : Dict = normalize_df[:, 2].tolist() __UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist() __UpperCamelCase : List[str] = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist() __UpperCamelCase : Tuple = x[: len(x) - 1] __UpperCamelCase : Any = x[len(x) - 1 :] # for linear regression & sarimax __UpperCamelCase : str = total_date[: len(total_date) - 1] __UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1] __UpperCamelCase : List[Any] = total_match[: len(total_match) - 1] __UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :] __UpperCamelCase : str = total_user[len(total_user) - 1 :] __UpperCamelCase : str = total_match[len(total_match) - 1 :] # voting system with forecasting __UpperCamelCase : Any = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
4
0
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCamelCase = 16 __UpperCamelCase = 32 def _a ( _lowerCamelCase , _lowerCamelCase = 16 ) -> Dict: """simple docstring""" __snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __snake_case : Dict = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) __snake_case : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __snake_case : Optional[Any] = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. __snake_case : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __snake_case : int = 16 elif accelerator.mixed_precision != "no": __snake_case : Any = 8 else: __snake_case : List[Any] = None return tokenizer.pad( _lowerCamelCase , padding="""longest""" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. __snake_case : List[str] = DataLoader( tokenized_datasets["""train"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) __snake_case : Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __UpperCamelCase = mocked_dataloaders # noqa: F811 def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _lowerCamelCase ) == "1": __snake_case : Optional[int] = 2 # Initialize accelerator __snake_case : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __snake_case : Union[str, Any] = config["""lr"""] __snake_case : Optional[int] = int(config["""num_epochs"""] ) __snake_case : Optional[int] = int(config["""seed"""] ) __snake_case : List[Any] = int(config["""batch_size"""] ) __snake_case : int = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_lowerCamelCase ) def inner_training_loop(_lowerCamelCase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_lowerCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __snake_case : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __snake_case : Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer __snake_case : int = AdamW(params=model.parameters() , lr=_lowerCamelCase ) __snake_case , __snake_case : Optional[int] = get_dataloaders(_lowerCamelCase , _lowerCamelCase ) # Instantiate scheduler __snake_case : Optional[Any] = get_linear_schedule_with_warmup( optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : int = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Now we train the model for epoch in range(_lowerCamelCase ): model.train() for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __snake_case : Optional[int] = model(**_lowerCamelCase ) __snake_case : Optional[Any] = outputs.loss accelerator.backward(_lowerCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __snake_case : Optional[Any] = model(**_lowerCamelCase ) __snake_case : List[Any] = outputs.logits.argmax(dim=-1 ) __snake_case , __snake_case : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=_lowerCamelCase , references=_lowerCamelCase , ) __snake_case : Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _lowerCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) __snake_case : Any = parser.parse_args() __snake_case : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": main()
26
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , ) lowerCAmelCase = parser.parse_args() return args def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ): if not len(_UpperCAmelCase ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) lowerCAmelCase ,lowerCAmelCase = imgs[0].size lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) ) lowerCAmelCase ,lowerCAmelCase = grid.size for i, img in enumerate(_UpperCAmelCase ): grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) ) return grid def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ): lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase ) lowerCAmelCase = pipeline( _UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) ) lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __UpperCamelCase : Optional[Any] = parse_args() # Load models and create wrapper for stable diffusion __UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') __UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') __UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') __UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') __UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): __UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: __UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id)) __UpperCamelCase : Optional[Any] = pipeline.to(unet.device) __UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) __UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
4
0
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase( __snake_case , unittest.TestCase ): '''simple docstring''' __magic_name__ = KandinskyVaaControlnetPipeline __magic_name__ = ['image_embeds', 'negative_image_embeds', 'hint'] __magic_name__ = ['image_embeds', 'negative_image_embeds', 'hint'] __magic_name__ = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __magic_name__ = False @property def lowerCAmelCase__ ( self ): return 32 @property def lowerCAmelCase__ ( self ): return 32 @property def lowerCAmelCase__ ( self ): return self.time_input_dim @property def lowerCAmelCase__ ( self ): return self.time_input_dim * 4 @property def lowerCAmelCase__ ( self ): return 100 @property def lowerCAmelCase__ ( self ): torch.manual_seed(0 ) _A = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _A = UNetaDConditionModel(**snake_case_ ) return model @property def lowerCAmelCase__ ( self ): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase__ ( self ): torch.manual_seed(0 ) _A = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase__ ( self ): _A = self.dummy_unet _A = self.dummy_movq _A = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case_ , ) _A = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowerCAmelCase__ ( self , snake_case_ , snake_case_=0 ): _A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case_ ) ).to(snake_case_ ) _A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case_ ) # create hint _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ ) if str(snake_case_ ).startswith('mps' ): _A = torch.manual_seed(snake_case_ ) else: _A = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _A = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 64, 'width': 64, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def lowerCAmelCase__ ( self ): _A = 'cpu' _A = self.get_dummy_components() _A = self.pipeline_class(**snake_case_ ) _A = pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) _A = pipe(**self.get_dummy_inputs(snake_case_ ) ) _A = output.images _A = pipe( **self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A = np.array( [0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class lowerCamelCase( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self ): _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' ) _A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png' ) _A = torch.from_numpy(np.array(snake_case_ ) ).float() / 255.0 _A = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) _A = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case_ ) _A = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa ) _A = pipeline.to(snake_case_ ) pipeline.set_progress_bar_config(disable=snake_case_ ) _A = 'A robot, 4k photo' _A = torch.Generator(device='cuda' ).manual_seed(0 ) _A, _A = pipe_prior( snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _A = torch.Generator(device='cuda' ).manual_seed(0 ) _A = pipeline( image_embeds=snake_case_ , negative_image_embeds=snake_case_ , hint=snake_case_ , generator=snake_case_ , num_inference_steps=100 , output_type='np' , ) _A = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(snake_case_ , snake_case_ )
27
"""simple docstring""" import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __UpperCamelCase : List[Any] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ): lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}' dest_layers.load_state_dict(layers_to_copy.state_dict() ) __UpperCamelCase : Optional[Any] = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __UpperCamelCase : int = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ): try: lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first' F' {n_student}' ) return list(range(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ): if n_student > n_teacher: raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' ) elif n_teacher == n_student: return list(range(_UpperCAmelCase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ): lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(_UpperCAmelCase , _UpperCAmelCase ): AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval() else: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}' lowerCAmelCase = teacher.config.to_diff_dict() try: lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: lowerCAmelCase = teacher_e if d is None: lowerCAmelCase = teacher_d init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} ) except AttributeError: # T5 if hasattr(teacher.config , 'num_encoder_layers' ): lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: lowerCAmelCase = teacher_e if d is None: lowerCAmelCase = teacher_d if hasattr(teacher.config , 'num_encoder_layers' ): init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} ) else: init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(_UpperCAmelCase ) # Copy weights lowerCAmelCase = teacher.config_class(**_UpperCAmelCase ) lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to' F' {save_path}' ) student.save_pretrained(_UpperCAmelCase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) if d_layers_to_copy is None: lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) try: if hasattr( _UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase ) copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' ) lowerCAmelCase = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(_UpperCAmelCase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
4
0
'''simple docstring''' from __future__ import annotations def lowercase__( __UpperCamelCase: dict ,__UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = set(__UpperCamelCase ), [start] while stack: SCREAMING_SNAKE_CASE : Any = stack.pop() explored.add(__UpperCamelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__UpperCamelCase ) return explored UpperCamelCase_ = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], "F": ["C", "E", "G"], "G": ["F"], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, "A"))
28
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
0
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class __lowerCamelCase ( enum.Enum ): a__: Any = 0 a__: Tuple = 1 a__: Optional[Any] = 2 @add_end_docstrings(lowerCAmelCase ) class __lowerCamelCase ( lowerCAmelCase ): a__: Tuple = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__( self , *UpperCAmelCase , **UpperCAmelCase ): super().__init__(*UpperCAmelCase , **UpperCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. lowerCamelCase_ = None if self.model.config.prefix is not None: lowerCamelCase_ = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. lowerCamelCase_ = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._sanitize_parameters(prefix=UpperCAmelCase , **self._forward_params ) lowerCamelCase_ = {**self._preprocess_params, **preprocess_params} lowerCamelCase_ = {**self._forward_params, **forward_params} def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ): lowerCamelCase_ = {} if prefix is not None: lowerCamelCase_ = prefix if prefix: lowerCamelCase_ = self.tokenizer( UpperCAmelCase , padding=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=self.framework ) lowerCamelCase_ = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ''' [None, \'hole\']''' ) lowerCamelCase_ = handle_long_generation preprocess_params.update(UpperCAmelCase ) lowerCamelCase_ = generate_kwargs lowerCamelCase_ = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) lowerCamelCase_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) lowerCamelCase_ = ReturnType.TENSORS if return_type is not None: lowerCamelCase_ = return_type if clean_up_tokenization_spaces is not None: lowerCamelCase_ = clean_up_tokenization_spaces if stop_sequence is not None: lowerCamelCase_ = self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) if len(UpperCAmelCase ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) lowerCamelCase_ = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*UpperCAmelCase , **UpperCAmelCase ) def __call__( self , UpperCAmelCase , **UpperCAmelCase ): return super().__call__(UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase="" , UpperCAmelCase=None , **UpperCAmelCase ): lowerCamelCase_ = self.tokenizer( prefix + prompt_text , padding=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=self.framework ) lowerCamelCase_ = prompt_text if handle_long_generation == "hole": lowerCamelCase_ = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: lowerCamelCase_ = generate_kwargs['''max_new_tokens'''] else: lowerCamelCase_ = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: lowerCamelCase_ = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) lowerCamelCase_ = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: lowerCamelCase_ = inputs['''attention_mask'''][:, -keep_length:] return inputs def UpperCAmelCase__ ( self , UpperCAmelCase , **UpperCAmelCase ): lowerCamelCase_ = model_inputs['''input_ids'''] lowerCamelCase_ = model_inputs.get('''attention_mask''' , UpperCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = 1 else: lowerCamelCase_ = input_ids.shape[0] lowerCamelCase_ = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. lowerCamelCase_ = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: lowerCamelCase_ = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: lowerCamelCase_ = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length lowerCamelCase_ = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL lowerCamelCase_ = self.model.generate(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase ) lowerCamelCase_ = generated_sequence.shape[0] if self.framework == "pt": lowerCamelCase_ = generated_sequence.reshape(UpperCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": lowerCamelCase_ = tf.reshape(UpperCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=ReturnType.FULL_TEXT , UpperCAmelCase=True ): lowerCamelCase_ = model_outputs['''generated_sequence'''][0] lowerCamelCase_ = model_outputs['''input_ids'''] lowerCamelCase_ = model_outputs['''prompt_text'''] lowerCamelCase_ = generated_sequence.numpy().tolist() lowerCamelCase_ = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: lowerCamelCase_ = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text lowerCamelCase_ = self.tokenizer.decode( UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: lowerCamelCase_ = 0 else: lowerCamelCase_ = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: lowerCamelCase_ = prompt_text + text[prompt_length:] else: lowerCamelCase_ = text[prompt_length:] lowerCamelCase_ = {'''generated_text''': all_text} records.append(UpperCAmelCase ) return records
29
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): lowerCAmelCase = 0.00 lowerCAmelCase = 0 for resistor in resistors: if resistor <= 0: lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!' raise ValueError(_UpperCAmelCase ) first_sum += 1 / float(_UpperCAmelCase ) index += 1 return 1 / first_sum def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): lowerCAmelCase = 0.00 lowerCAmelCase = 0 for resistor in resistors: sum_r += resistor if resistor < 0: lowerCAmelCase = F'Resistor at index {index} has a negative value!' raise ValueError(_UpperCAmelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
4
0
import unittest import numpy as np def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ): '''simple docstring''' UpperCAmelCase_ : Dict = np.shape(_lowercase ) UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase ) UpperCAmelCase_ : Tuple = np.shape(_lowercase ) if shape_a[0] != shape_b[0]: UpperCAmelCase_ : Tuple = ( '''Expected the same number of rows for A and B. ''' f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(_lowercase ) if shape_b[1] != shape_c[1]: UpperCAmelCase_ : List[Any] = ( '''Expected the same number of columns for B and C. ''' f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(_lowercase ) UpperCAmelCase_ : Dict = pseudo_inv if a_inv is None: try: UpperCAmelCase_ : Any = np.linalg.inv(_lowercase ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] ) UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s ) def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> None: UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : Tuple = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class a ( a__ ): snake_case__ = '''glpn''' def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) lowerCAmelCase = num_channels lowerCAmelCase = num_encoder_blocks lowerCAmelCase = depths lowerCAmelCase = sr_ratios lowerCAmelCase = hidden_sizes lowerCAmelCase = patch_sizes lowerCAmelCase = strides lowerCAmelCase = mlp_ratios lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = drop_path_rate lowerCAmelCase = layer_norm_eps lowerCAmelCase = decoder_hidden_size lowerCAmelCase = max_depth lowerCAmelCase = head_in_index
4
0
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCamelCase__ : int = None lowerCamelCase__ : Any = logging.get_logger(__name__) lowerCamelCase__ : str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase__ : Union[str, Any] = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } lowerCamelCase__ : Optional[Any] = { 'facebook/nllb-large-en-ro': 1_024, 'facebook/nllb-200-distilled-600M': 1_024, } # fmt: off lowerCamelCase__ : List[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = ["input_ids", "attention_mask"] lowercase_ = NllbTokenizer lowercase_ = [] lowercase_ = [] def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]="<s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : Any="</s>" , _lowerCAmelCase : Union[str, Any]="<s>" , _lowerCAmelCase : Dict="<unk>" , _lowerCAmelCase : int="<pad>" , _lowerCAmelCase : Optional[int]="<mask>" , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : List[str] , ): # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token SCREAMING_SNAKE_CASE_ = legacy_behaviour super().__init__( vocab_file=_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , src_lang=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , legacy_behaviour=_lowerCAmelCase , **_lowerCAmelCase , ) SCREAMING_SNAKE_CASE_ = vocab_file SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True SCREAMING_SNAKE_CASE_ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) SCREAMING_SNAKE_CASE_ = { lang_code: self.convert_tokens_to_ids(_lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } SCREAMING_SNAKE_CASE_ = src_lang if src_lang is not None else 'eng_Latn' SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(self._src_lang ) SCREAMING_SNAKE_CASE_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowerCAmelCase_ ( self : Tuple ): return self._src_lang @src_lang.setter def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : str ): SCREAMING_SNAKE_CASE_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[str] , **_lowerCAmelCase : Optional[Any] ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) SCREAMING_SNAKE_CASE_ = src_lang SCREAMING_SNAKE_CASE_ = self(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tgt_lang_id return inputs def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str = "eng_Latn" , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : str = "fra_Latn" , **_lowerCAmelCase : List[str] , ): SCREAMING_SNAKE_CASE_ = src_lang SCREAMING_SNAKE_CASE_ = tgt_lang return super().prepare_seqaseq_batch(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): return self.set_src_lang_special_tokens(self.src_lang ) def lowerCAmelCase_ ( self : Any ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[Any] ): SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_lowerCAmelCase ) if self.legacy_behaviour: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE_ = [self.cur_lang_code] SCREAMING_SNAKE_CASE_ = [self.eos_token_id] SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.prefix_tokens ) SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.suffix_tokens ) SCREAMING_SNAKE_CASE_ = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str ): SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_lowerCAmelCase ) if self.legacy_behaviour: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE_ = [self.cur_lang_code] SCREAMING_SNAKE_CASE_ = [self.eos_token_id] SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.prefix_tokens ) SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.suffix_tokens ) SCREAMING_SNAKE_CASE_ = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_lowerCAmelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory." ) return SCREAMING_SNAKE_CASE_ = os.path.join( _lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ): copyfile(self.vocab_file , _lowerCAmelCase ) return (out_vocab_file,)
31
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = range_bbox def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase = bbox[i, j, 3] lowerCAmelCase = bbox[i, j, 1] lowerCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase = bbox[i, j, 2] lowerCAmelCase = bbox[i, j, 0] lowerCAmelCase = t lowerCAmelCase = tf.convert_to_tensor(_snake_case ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) snake_case__ = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = True snake_case__ = 1_0 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def _SCREAMING_SNAKE_CASE (): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the sequence output on [0, :3, :3] lowerCAmelCase = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) ) # test the pooled output on [1, :3] lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCAmelCase = outputs.loss lowerCAmelCase = (2,) self.assertEqual(loss.shape , _snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = (2, 2) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the shape of the logits lowerCAmelCase = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _snake_case ) self.assertEqual(outputs.end_logits.shape , _snake_case )
4
0
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) UpperCAmelCase_ = "\\n Text data.\n Second line of data." UpperCAmelCase_ = "file" @pytest.fixture(scope='''session''' ) def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict: """simple docstring""" _UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _UpperCAmelCase = bytes(SCREAMING_SNAKE_CASE_ , '''utf-8''' ) with zstd.open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: """simple docstring""" with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> str: """simple docstring""" _UpperCAmelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _UpperCAmelCase = input_paths[compression_format] _UpperCAmelCase = tmp_path / '''cache''' _UpperCAmelCase = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ ) as f: _UpperCAmelCase = f.read() with open(SCREAMING_SNAKE_CASE_ ) as f: _UpperCAmelCase = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase = '''custom_cache''' _UpperCAmelCase = '''custom_extracted_dir''' _UpperCAmelCase = tmp_path / '''custom_extracted_path''' if default_extracted: _UpperCAmelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , SCREAMING_SNAKE_CASE_ ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(SCREAMING_SNAKE_CASE_ ) ) _UpperCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _UpperCAmelCase = xz_file _UpperCAmelCase = ( DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) ) _UpperCAmelCase = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file # relative path _UpperCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) # relative path _UpperCAmelCase = '''./__missing_file__.txt''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: """simple docstring""" _UpperCAmelCase = get_from_cache(F'''tmp://{tmpfs_file}''' ) with open(SCREAMING_SNAKE_CASE_ ) as f: _UpperCAmelCase = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE_ ) def A__ ( ) -> Any: """simple docstring""" with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str: """simple docstring""" _UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_get('''https://huggingface.co''' , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_get('''ftp://huggingface.co''' , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]: """simple docstring""" _UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_get('''s3://huggingface.co''' , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_head('''s3://huggingface.co''' )
32
"""simple docstring""" import argparse import os import re import packaging.version __UpperCamelCase : Union[str, Any] = '''examples/''' __UpperCamelCase : str = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __UpperCamelCase : List[str] = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __UpperCamelCase : Optional[int] = '''README.md''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ): with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.read() lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern] lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase ) lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): for folder, directories, fnames in os.walk(_UpperCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not patch: update_version_in_examples(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '🤗 Transformers currently provides the following architectures' lowerCAmelCase = '1. Want to contribute a new model?' with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.readlines() # Find the start of the list. lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): lowerCAmelCase = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): with open(REPLACE_FILES['init'] , 'r' ) as f: lowerCAmelCase = f.read() lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0] return packaging.version.parse(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ): lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: lowerCAmelCase = default_version.base_version elif patch: lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' ) if len(_UpperCAmelCase ) == 0: lowerCAmelCase = default_version print(F'Updating version to {version}.' ) global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = get_version() lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0' lowerCAmelCase = current_version.base_version # Check with the user we got that right. lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' ) if len(_UpperCAmelCase ) == 0: lowerCAmelCase = dev_version print(F'Updating version to {version}.' ) global_version_update(_UpperCAmelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __UpperCamelCase : Optional[int] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
4
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ : int = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Dict = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __UpperCamelCase : Optional[int] = pytest.mark.integration @require_faiss class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} ) return dset def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() lowerCAmelCase = dset.map( lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case ) lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def UpperCamelCase__ ( self ): """simple docstring""" from elasticsearch import Elasticsearch lowerCAmelCase = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: lowerCAmelCase = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} lowerCAmelCase = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_snake_case ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1] lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case ) self.assertRaises(_snake_case , index.search_batch , queries[0] ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) lowerCAmelCase = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_snake_case ): lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = faiss.IndexFlat(5 ) lowerCAmelCase = FaissIndex(custom_index=_snake_case ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file: index.save(tmp_file.name ) lowerCAmelCase = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ): import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) lowerCAmelCase = 'index.faiss' lowerCAmelCase = F'mock://{index_name}' index.save(_UpperCAmelCase , storage_options=mockfs.storage_options ) lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options ) lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: lowerCAmelCase = Elasticsearch() lowerCAmelCase = {'acknowledged': True} lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query lowerCAmelCase = 'foo' lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout lowerCAmelCase = 'foo' lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries lowerCAmelCase = ['foo', 'bar', 'foobar'] lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([1, 1, 1] , _snake_case ) # batched queries with timeout lowerCAmelCase = ['foo', 'bar', 'foobar'] lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([1, 1, 1] , _snake_case )
4
0
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE_ = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } SCREAMING_SNAKE_CASE_ = { 'gpt-neox-20b': 2048, } class snake_case_ ( lowerCamelCase_ ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_=False , **lowerCamelCase_ , ) -> List[str]: super().__init__( lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , lowerCamelCase_) != add_prefix_space: UpperCamelCase = getattr(lowerCamelCase_ , pre_tok_state.pop('''type''')) UpperCamelCase = add_prefix_space UpperCamelCase = pre_tok_class(**lowerCamelCase_) UpperCamelCase = add_prefix_space def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]: UpperCamelCase = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_) return tuple(lowerCamelCase_) def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[int]: UpperCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_) + [self.eos_token_id]) if len(lowerCamelCase_) > self.model_max_length: UpperCamelCase = input_ids[-self.model_max_length :] return input_ids
34
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class a ( a__ , a__ , unittest.TestCase ): snake_case__ = IFInpaintingPipeline snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase__ ( self ): """simple docstring""" return self._get_dummy_components() def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def UpperCamelCase__ ( self ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_local() def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
4
0
from argparse import ArgumentParser from .env import EnvironmentCommand def a ( ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' ) SCREAMING_SNAKE_CASE__ : int = parser.add_subparsers(help='''diffusers-cli command helpers''' ) # Register commands EnvironmentCommand.register_subcommand(A__ ) # Let's go SCREAMING_SNAKE_CASE__ : Any = parser.parse_args() if not hasattr(A__ , '''func''' ): parser.print_help() exit(1 ) # Run SCREAMING_SNAKE_CASE__ : List[str] = args.func(A__ ) service.run() if __name__ == "__main__": main()
35
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = self.vocab_size - 1 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case ) lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case__ = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , ) lowerCAmelCase = inputs_dict['labels'] lowerCAmelCase = inputs_dict['labels'] lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , ) lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_snake_case ) return inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenAIGPTModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @require_torch class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(_snake_case ) lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is lowerCAmelCase = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case ) self.assertListEqual(output_ids[0].tolist() , _snake_case )
4
0
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class _A ( snake_case ): '''simple docstring''' __lowerCamelCase : torch.FloatTensor __lowerCamelCase : torch.FloatTensor class _A ( snake_case , snake_case ): '''simple docstring''' __lowerCamelCase : List[str] = 1 @register_to_config def __init__( self ,SCREAMING_SNAKE_CASE_ = 2000 ,SCREAMING_SNAKE_CASE_ = 0.15 ,SCREAMING_SNAKE_CASE_ = 0.01 ,SCREAMING_SNAKE_CASE_ = 13_48.0 ,SCREAMING_SNAKE_CASE_ = 1E-5 ,SCREAMING_SNAKE_CASE_ = 1 ,): '''simple docstring''' # standard deviation of the initial noise distribution snake_case : Union[str, Any] = sigma_max # setable values snake_case : Any = None self.set_sigmas(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' return sample def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' snake_case : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps snake_case : str = torch.linspace(1 ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,device=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' snake_case : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min snake_case : str = sigma_max if sigma_max is not None else self.config.sigma_max snake_case : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) snake_case : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) snake_case : List[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE_ ) ,math.log(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ) ) snake_case : Optional[Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return torch.where( timesteps == 0 ,torch.zeros_like(t.to(timesteps.device ) ) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device ) ,) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,): '''simple docstring''' if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) snake_case : Optional[Any] = timestep * torch.ones( sample.shape[0] ,device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) snake_case : List[str] = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda snake_case : Dict = timesteps.to(self.discrete_sigmas.device ) snake_case : Any = self.discrete_sigmas[timesteps].to(sample.device ) snake_case : str = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ).to(sample.device ) snake_case : Tuple = torch.zeros_like(SCREAMING_SNAKE_CASE_ ) snake_case : Optional[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods snake_case : Tuple = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): snake_case : Dict = diffusion.unsqueeze(-1 ) snake_case : List[Any] = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of snake_case : Tuple = randn_tensor( sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE_ ,device=sample.device ,dtype=sample.dtype ) snake_case : Tuple = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? snake_case : List[str] = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE_ ,prev_sample_mean=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,): '''simple docstring''' if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction snake_case : Dict = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE_ ).to(sample.device ) # compute step size from the model_output, the noise, and the snr snake_case : List[str] = torch.norm(model_output.reshape(model_output.shape[0] ,-1 ) ,dim=-1 ).mean() snake_case : List[Any] = torch.norm(noise.reshape(noise.shape[0] ,-1 ) ,dim=-1 ).mean() snake_case : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 snake_case : int = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term snake_case : Optional[int] = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): snake_case : Union[str, Any] = step_size.unsqueeze(-1 ) snake_case : Optional[Any] = sample + step_size * model_output snake_case : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,): '''simple docstring''' # Make sure sigmas and timesteps have the same device and dtype as original_samples snake_case : Optional[Any] = timesteps.to(original_samples.device ) snake_case : Any = self.discrete_sigmas.to(original_samples.device )[timesteps] snake_case : Tuple = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(SCREAMING_SNAKE_CASE_ ) * sigmas[:, None, None, None] ) snake_case : Optional[Any] = noise + original_samples return noisy_samples def __len__( self ): '''simple docstring''' return self.config.num_train_timesteps
36
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __UpperCamelCase : str = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' ) parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' ) parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' ) lowerCAmelCase = parser.parse_args() logger.info(F'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]` lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>` lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(F'Loading text from {args.file_path}' ) with open(args.file_path , 'r' , encoding='utf8' ) as fp: lowerCAmelCase = fp.readlines() logger.info('Start encoding' ) logger.info(F'{len(_UpperCAmelCase )} examples to process.' ) lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 1_0000 lowerCAmelCase = time.time() for text in data: lowerCAmelCase = F'{bos} {text.strip()} {sep}' lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) rslt.append(_UpperCAmelCase ) iter += 1 if iter % interval == 0: lowerCAmelCase = time.time() logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) lowerCAmelCase = time.time() logger.info('Finished binarization' ) logger.info(F'{len(_UpperCAmelCase )} examples processed.' ) lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle' lowerCAmelCase = tokenizer.vocab_size if vocab_size < (1 << 16): lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt] else: lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'Dump to {dp_file}' ) with open(_UpperCAmelCase , 'wb' ) as handle: pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
4
0
def UpperCamelCase_ ( __a , __a ) -> str: if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a__ : Optional[Any] = str(bin(__a ) )[2:] # remove the leading "0b" a__ : Optional[int] = str(bin(__a ) )[2:] # remove the leading "0b" a__ : Optional[Any] = max(len(__a ) , len(__a ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(__a ) , b_binary.zfill(__a ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
37
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Tuple = { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''', '''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''', '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''', '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''', '''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json''' ), '''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''', # See all BERT models at https://huggingface.co/models?filter=bert } class a ( a__ ): snake_case__ = '''bert''' def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , **_snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache lowerCAmelCase = classifier_dropout class a ( a__ ): @property def UpperCamelCase__ ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
4
0
'''simple docstring''' def UpperCamelCase__ ( __magic_name__ : int = 10 ) -> str: '''simple docstring''' if not isinstance(__magic_name__ , __magic_name__ ) or n < 0: raise ValueError("""Invalid input""" ) snake_case__ : List[Any] = 10**n snake_case__ : Tuple = 2_84_33 * (pow(2 , 7_83_04_57 , __magic_name__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'{solution(10) = }')
38
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a ( a__ , unittest.TestCase ): snake_case__ = DanceDiffusionPipeline snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS snake_case__ = PipelineTesterMixin.required_optional_params - { '''callback''', '''latents''', '''callback_steps''', '''output_type''', '''num_images_per_prompt''', } snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , ) lowerCAmelCase = IPNDMScheduler() lowerCAmelCase = { 'unet': unet, 'scheduler': scheduler, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 4, } return inputs def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = DanceDiffusionPipeline(**_snake_case ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = pipe(**_snake_case ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_save_load_local() @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_save_load_optional_components() @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_attention_slicing_forward_pass() def UpperCamelCase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch_device lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch_device lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
4
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = filter(lambda SCREAMING_SNAKE_CASE__ : p.requires_grad , model.parameters() ) snake_case_ = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ = logging.getLogger(__name__) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if metric == "rouge2": snake_case_ = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": snake_case_ = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": snake_case_ = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ''' function.''' ) snake_case_ = ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , monitor=F'''val_{metric}''' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return EarlyStopping( monitor=F'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , ) class snake_case_ ( pl.Callback ): '''simple docstring''' def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) ->List[Any]: snake_case_ = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_UpperCamelCase ) @rank_zero_only def snake_case__( self : Union[str, Any] , _UpperCamelCase : pl.Trainer , _UpperCamelCase : pl.LightningModule , _UpperCamelCase : str , _UpperCamelCase : List[str]=True ) ->None: logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) snake_case_ = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results snake_case_ = Path(pl_module.hparams.output_dir ) if type_path == "test": snake_case_ = od / '''test_results.txt''' snake_case_ = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. snake_case_ = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' snake_case_ = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=_UpperCamelCase ) generations_file.parent.mkdir(exist_ok=_UpperCamelCase ) with open(_UpperCamelCase , '''a+''' ) as writer: for key in sorted(_UpperCamelCase ): if key in ["log", "progress_bar", "preds"]: continue snake_case_ = metrics[key] if isinstance(_UpperCamelCase , torch.Tensor ): snake_case_ = val.item() snake_case_ = f'''{key}: {val:.6f}\n''' writer.write(_UpperCamelCase ) if not save_generations: return if "preds" in metrics: snake_case_ = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(_UpperCamelCase ) @rank_zero_only def snake_case__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : str ) ->Optional[Any]: try: snake_case_ = pl_module.model.model.num_parameters() except AttributeError: snake_case_ = pl_module.model.num_parameters() snake_case_ = count_trainable_parameters(_UpperCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def snake_case__( self : Tuple , _UpperCamelCase : pl.Trainer , _UpperCamelCase : pl.LightningModule ) ->str: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_UpperCamelCase , _UpperCamelCase , '''test''' ) @rank_zero_only def snake_case__( self : Tuple , _UpperCamelCase : pl.Trainer , _UpperCamelCase : Tuple ) ->Dict: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
39
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = OpenLlamaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = OpenLlamaModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , ) lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , ) lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() # first forward pass lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , ) lowerCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0] lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else () snake_case__ = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenLlamaModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase = type self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'single_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'multi_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @parameterized.expand([('linear',), ('dynamic',)] ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size ) lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = OpenLlamaModel(_snake_case ) original_model.to(_snake_case ) original_model.eval() lowerCAmelCase = original_model(_snake_case ).last_hidden_state lowerCAmelCase = original_model(_snake_case ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = {'type': scaling_type, 'factor': 10.0} lowerCAmelCase = OpenLlamaModel(_snake_case ) scaled_model.to(_snake_case ) scaled_model.eval() lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
4
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : List[str] = "decision_transformer" UpperCAmelCase__ : Union[str, Any] = ["past_key_values"] UpperCAmelCase__ : Optional[int] = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, SCREAMING_SNAKE_CASE_=17, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="relu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> str: UpperCamelCase : Optional[int] = state_dim UpperCamelCase : Optional[int] = act_dim UpperCamelCase : Tuple = hidden_size UpperCamelCase : int = max_ep_len UpperCamelCase : List[str] = action_tanh UpperCamelCase : Dict = vocab_size UpperCamelCase : Optional[int] = n_positions UpperCamelCase : List[str] = n_layer UpperCamelCase : Optional[int] = n_head UpperCamelCase : Optional[Any] = n_inner UpperCamelCase : Any = activation_function UpperCamelCase : Any = resid_pdrop UpperCamelCase : List[str] = embd_pdrop UpperCamelCase : str = attn_pdrop UpperCamelCase : List[str] = layer_norm_epsilon UpperCamelCase : Tuple = initializer_range UpperCamelCase : int = scale_attn_weights UpperCamelCase : Optional[int] = use_cache UpperCamelCase : Dict = scale_attn_by_inverse_layer_idx UpperCamelCase : Tuple = reorder_and_upcast_attn UpperCamelCase : Union[str, Any] = bos_token_id UpperCamelCase : Optional[int] = eos_token_id super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
40
"""simple docstring""" from typing import Any class a : def __init__( self , _snake_case ): """simple docstring""" lowerCAmelCase = data lowerCAmelCase = None def __repr__( self ): """simple docstring""" return F'Node({self.data})' class a : def __init__( self ): """simple docstring""" lowerCAmelCase = None def __iter__( self ): """simple docstring""" lowerCAmelCase = self.head while node: yield node.data lowerCAmelCase = node.next def __len__( self ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self ): """simple docstring""" return "->".join([str(_snake_case ) for item in self] ) def __getitem__( self , _snake_case ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , _snake_case , _snake_case ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) lowerCAmelCase = self.head for _ in range(_snake_case ): lowerCAmelCase = current.next lowerCAmelCase = data def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" self.insert_nth(len(self ) , _snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" self.insert_nth(0 , _snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) lowerCAmelCase = Node(_snake_case ) if self.head is None: lowerCAmelCase = new_node elif index == 0: lowerCAmelCase = self.head # link new_node to head lowerCAmelCase = new_node else: lowerCAmelCase = self.head for _ in range(index - 1 ): lowerCAmelCase = temp.next lowerCAmelCase = temp.next lowerCAmelCase = new_node def UpperCamelCase__ ( self ): # print every node data """simple docstring""" print(self ) def UpperCamelCase__ ( self ): """simple docstring""" return self.delete_nth(0 ) def UpperCamelCase__ ( self ): # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def UpperCamelCase__ ( self , _snake_case = 0 ): """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) lowerCAmelCase = self.head # default first node if index == 0: lowerCAmelCase = self.head.next else: lowerCAmelCase = self.head for _ in range(index - 1 ): lowerCAmelCase = temp.next lowerCAmelCase = temp.next lowerCAmelCase = temp.next.next return delete_node.data def UpperCamelCase__ ( self ): """simple docstring""" return self.head is None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = None lowerCAmelCase = self.head while current: # Store the current node's next node. lowerCAmelCase = current.next # Make the current node's next point backwards lowerCAmelCase = prev # Make the previous node be the current node lowerCAmelCase = current # Make the current node the next node (to progress iteration) lowerCAmelCase = next_node # Return prev in order to put the head at the end lowerCAmelCase = prev def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = LinkedList() assert linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_UpperCAmelCase ) == i linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_UpperCAmelCase ) == 9 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowerCAmelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = [ -9, 100, Node(7734_5112 ), 'dlrow olleH', 7, 5555, 0, -192.5_5555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] lowerCAmelCase = LinkedList() for i in test_input: linked_list.insert_tail(_UpperCAmelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowerCAmelCase = linked_list.delete_head() assert result == -9 assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowerCAmelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowerCAmelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(_UpperCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_UpperCAmelCase ) assert ( str(_UpperCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_UpperCAmelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _SCREAMING_SNAKE_CASE (): from doctest import testmod testmod() lowerCAmelCase = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(_UpperCAmelCase ) print('\nReading/changing Node data using indexing:' ) print(F'Element at Position 1: {linked_list[1]}' ) lowerCAmelCase = input('Enter New Value: ' ).strip() print('New list:' ) print(_UpperCAmelCase ) print(F'length of linked_list is : {len(_UpperCAmelCase )}' ) if __name__ == "__main__": main()
4
0
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
41
"""simple docstring""" from __future__ import annotations import requests def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(_UpperCAmelCase ).json() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ): lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories] return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ): lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase ) return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
4
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A_ = { "configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxResNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
42
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ): lowerCAmelCase = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowerCAmelCase = 4 lowerCAmelCase = 48 lowerCAmelCase = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase = [6, 6, 6, 6] lowerCAmelCase = 60 lowerCAmelCase = [6, 6, 6, 6] lowerCAmelCase = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase = 4 lowerCAmelCase = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowerCAmelCase = 1 lowerCAmelCase = 1 lowerCAmelCase = 126 lowerCAmelCase = 7 lowerCAmelCase = 255.0 lowerCAmelCase = '' return config def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ): if "patch_embed.proj" in name and "layers" not in name: lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: lowerCAmelCase = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: lowerCAmelCase = name.replace('attn' , 'attention.self' ) if "norm1" in name: lowerCAmelCase = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowerCAmelCase = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: lowerCAmelCase = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: lowerCAmelCase = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: lowerCAmelCase = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": lowerCAmelCase = 'layernorm.weight' if name == "norm.bias": lowerCAmelCase = 'layernorm.bias' if "conv_first" in name: lowerCAmelCase = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowerCAmelCase = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' ) lowerCAmelCase = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: lowerCAmelCase = 'swin2sr.' + name return name def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ): for key in orig_state_dict.copy().keys(): lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase ) if "qkv" in key: lowerCAmelCase = key.split('.' ) lowerCAmelCase = int(key_split[1] ) lowerCAmelCase = int(key_split[4] ) lowerCAmelCase = config.embed_dim if "weight" in key: lowerCAmelCase = val[:dim, :] lowerCAmelCase = val[dim : dim * 2, :] lowerCAmelCase = val[-dim:, :] else: lowerCAmelCase = val[:dim] lowerCAmelCase = val[dim : dim * 2] lowerCAmelCase = val[-dim:] pass else: lowerCAmelCase = val return orig_state_dict def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ): lowerCAmelCase = get_config(_UpperCAmelCase ) lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase ) model.eval() lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' ) lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' ) lowerCAmelCase = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256 lowerCAmelCase = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 ) if config.num_channels == 1: lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 ) lowerCAmelCase = model(_UpperCAmelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 512, 512] ) lowerCAmelCase = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 512, 512] ) lowerCAmelCase = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 ) print('Looks ok!' ) lowerCAmelCase = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } lowerCAmelCase = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_UpperCAmelCase ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') __UpperCamelCase : Optional[int] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class _a ( UpperCamelCase__ ): _lowercase : Any = '''big_bird''' def __init__( self: List[str] , UpperCamelCase_: List[Any]=50_358 , UpperCamelCase_: Dict=768 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Optional[int]=3_072 , UpperCamelCase_: Tuple="gelu_new" , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=4_096 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: int=1E-1_2 , UpperCamelCase_: Dict=True , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Any=1 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: List[str]=66 , UpperCamelCase_: Any="block_sparse" , UpperCamelCase_: int=True , UpperCamelCase_: Dict=False , UpperCamelCase_: List[Any]=64 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: Union[str, Any] , ) -> List[str]: """simple docstring""" super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , sep_token_id=UpperCamelCase_ , **UpperCamelCase_ , ) lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = type_vocab_size lowercase__ = layer_norm_eps lowercase__ = use_cache lowercase__ = rescale_embeddings lowercase__ = attention_type lowercase__ = use_bias lowercase__ = block_size lowercase__ = num_random_blocks lowercase__ = classifier_dropout class _a ( UpperCamelCase__ ): @property def lowerCamelCase_ ( self: str ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
43
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class a ( a__ ): snake_case__ = '''megatron-bert''' def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , **_snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache
4
0
'''simple docstring''' import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase__ : def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],): _lowerCamelCase : List[Any] = parent _lowerCamelCase : Optional[Any] = batch_size _lowerCamelCase : Optional[int] = image_size _lowerCamelCase : int = patch_size _lowerCamelCase : Optional[Any] = num_channels _lowerCamelCase : int = embed_dim _lowerCamelCase : int = hidden_sizes _lowerCamelCase : List[Any] = depths _lowerCamelCase : Any = num_heads _lowerCamelCase : List[str] = window_size _lowerCamelCase : str = mlp_ratio _lowerCamelCase : Any = qkv_bias _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : List[str] = drop_path_rate _lowerCamelCase : str = hidden_act _lowerCamelCase : Union[str, Any] = use_absolute_embeddings _lowerCamelCase : List[Any] = patch_norm _lowerCamelCase : Tuple = layer_norm_eps _lowerCamelCase : str = initializer_range _lowerCamelCase : Optional[int] = is_training _lowerCamelCase : Tuple = scope _lowerCamelCase : List[Any] = use_labels _lowerCamelCase : int = type_sequence_label_size _lowerCamelCase : Tuple = encoder_stride _lowerCamelCase : Any = out_features _lowerCamelCase : Any = out_indices def lowerCamelCase_ ( self : Any ): _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : List[Any] = None if self.use_labels: _lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size ) _lowerCamelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Union[str, Any] ): return FocalNetConfig( image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,) def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ): _lowerCamelCase : Optional[Any] = FocalNetModel(config=__A ) model.to(__A ) model.eval() _lowerCamelCase : Optional[Any] = model(__A ) _lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) ) def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ): _lowerCamelCase : Any = FocalNetBackbone(config=__A ) model.to(__A ) model.eval() _lowerCamelCase : List[str] = model(__A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ),len(config.out_features ) ) self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] ) # verify backbone works with out_features=None _lowerCamelCase : List[str] = None _lowerCamelCase : List[str] = FocalNetBackbone(config=__A ) model.to(__A ) model.eval() _lowerCamelCase : str = model(__A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ),1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ),1 ) self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ): _lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A ) model.to(__A ) model.eval() _lowerCamelCase : List[str] = model(__A ) self.parent.assertEqual( result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCamelCase : Dict = 1 _lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A ) model.to(__A ) model.eval() _lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCamelCase : Optional[int] = model(__A ) self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ): _lowerCamelCase : Union[str, Any] = self.type_sequence_label_size _lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A ) model.to(__A ) model.eval() _lowerCamelCase : Optional[int] = model(__A,labels=__A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCamelCase : str = 1 _lowerCamelCase : str = FocalNetForImageClassification(__A ) model.to(__A ) model.eval() _lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCamelCase : List[Any] = model(__A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase_ ( self : Optional[int] ): _lowerCamelCase : int = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs _lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase__ ( A , A , unittest.TestCase ): lowerCAmelCase_ = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowerCAmelCase_ = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowerCamelCase_ ( self : int ): _lowerCamelCase : Optional[int] = FocalNetModelTester(self ) _lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A ) def lowerCamelCase_ ( self : Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : List[str] ): return def lowerCamelCase_ ( self : Any ): _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def lowerCamelCase_ ( self : int ): _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__A ) def lowerCamelCase_ ( self : Union[str, Any] ): _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__A ) def lowerCamelCase_ ( self : int ): _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @unittest.skip(reason="FocalNet does not use inputs_embeds" ) def lowerCamelCase_ ( self : Optional[int] ): pass @unittest.skip(reason="FocalNet does not use feedforward chunking" ) def lowerCamelCase_ ( self : List[str] ): pass def lowerCamelCase_ ( self : List[str] ): _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCamelCase : str = model_class(__A ) self.assertIsInstance(model.get_input_embeddings(),(nn.Module) ) _lowerCamelCase : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A,nn.Linear ) ) def lowerCamelCase_ ( self : List[Any] ): _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCamelCase : Union[str, Any] = model_class(__A ) _lowerCamelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : int = [*signature.parameters.keys()] _lowerCamelCase : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1],__A ) def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ): _lowerCamelCase : Union[str, Any] = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) ) _lowerCamelCase : Optional[int] = outputs.hidden_states _lowerCamelCase : int = getattr( self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__A ),__A ) # FocalNet has a different seq_length _lowerCamelCase : Optional[Any] = ( config.patch_size if isinstance(config.patch_size,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],) _lowerCamelCase : Any = outputs.reshaped_hidden_states self.assertEqual(len(__A ),__A ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape _lowerCamelCase : List[str] = ( reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],) def lowerCamelCase_ ( self : Union[str, Any] ): _lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Optional[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _lowerCamelCase : List[Any] = True self.check_hidden_states_output(__A,__A,__A,__A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : List[Any] = True self.check_hidden_states_output(__A,__A,__A,__A ) def lowerCamelCase_ ( self : Optional[Any] ): _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Tuple = 3 _lowerCamelCase : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowerCamelCase : Tuple = ( config.patch_size if isinstance(config.patch_size,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _lowerCamelCase : List[Any] = True self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : Optional[Any] = True self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) ) @slow def lowerCamelCase_ ( self : Tuple ): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def lowerCamelCase_ ( self : Tuple ): _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Optional[Any] = _config_zero_init(__A ) for model_class in self.all_model_classes: _lowerCamelCase : Any = model_class(config=__A ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',) @require_vision @require_torch class UpperCAmelCase__ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Union[str, Any] ): # TODO update organization return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Union[str, Any] ): _lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A ) # forward pass with torch.no_grad(): _lowerCamelCase : Dict = model(**__A ) # verify the logits _lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape,__A ) _lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 ) @require_torch class UpperCAmelCase__ ( A , unittest.TestCase ): lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else () lowerCAmelCase_ = FocalNetConfig lowerCAmelCase_ = False def lowerCamelCase_ ( self : int ): _lowerCamelCase : int = FocalNetModelTester(self )
44
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
4
0
def A ( lowercase__ : int ) -> str: if number > 0: raise ValueError("""input must be a negative integer""" ) UpperCamelCase__ :List[Any] = len(bin(lowercase__ )[3:] ) UpperCamelCase__ :str = bin(abs(lowercase__ ) - (1 << binary_number_length) )[3:] UpperCamelCase__ :List[Any] = ( ( """1""" + """0""" * (binary_number_length - len(lowercase__ )) + twos_complement_number ) if number < 0 else """0""" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
45
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class a ( a__ ): snake_case__ = 42 class a ( a__ , a__ ): @register_to_config def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ): """simple docstring""" super().__init__() # pass init params to Encoder lowerCAmelCase = Encoder( in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , ) lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 ) lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case ) lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 ) # pass init params to Decoder lowerCAmelCase = Decoder( in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , ) @apply_forward_hook def UpperCamelCase__ ( self , _snake_case , _snake_case = True ): """simple docstring""" lowerCAmelCase = self.encoder(_snake_case ) lowerCAmelCase = self.quant_conv(_snake_case ) if not return_dict: return (h,) return VQEncoderOutput(latents=_snake_case ) @apply_forward_hook def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ): """simple docstring""" if not force_not_quantize: lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case ) else: lowerCAmelCase = h lowerCAmelCase = self.post_quant_conv(_snake_case ) lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case = True ): """simple docstring""" lowerCAmelCase = sample lowerCAmelCase = self.encode(_snake_case ).latents lowerCAmelCase = self.decode(_snake_case ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_snake_case )
4
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase : Any = { '''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''], '''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''], '''processing_whisper''': ['''WhisperProcessor'''], '''tokenization_whisper''': ['''WhisperTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = ['''WhisperTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = [ '''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WhisperForConditionalGeneration''', '''WhisperModel''', '''WhisperPreTrainedModel''', '''WhisperForAudioClassification''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = [ '''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWhisperForConditionalGeneration''', '''TFWhisperModel''', '''TFWhisperPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : List[str] = [ '''FlaxWhisperForConditionalGeneration''', '''FlaxWhisperModel''', '''FlaxWhisperPreTrainedModel''', '''FlaxWhisperForAudioClassification''', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys _lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
46
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping __UpperCamelCase : Optional[Any] = tuple[int, int] class a : def __init__( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = vertices lowerCAmelCase = { (min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items() } def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCAmelCase = weight def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Graph({min(self.vertices )} , {} ) lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 while len(subgraph.vertices ) < len(self.vertices ): lowerCAmelCase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCAmelCase = edge lowerCAmelCase = weight subgraph.add_edge(_snake_case , _snake_case ) return subgraph def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ): lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) ) lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = {} lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 with open(_UpperCAmelCase ) as f: lowerCAmelCase = f.read().strip().split('\n' ) lowerCAmelCase = [line.split(',' ) for line in data] for edgea in range(1 , len(_UpperCAmelCase ) ): for edgea in range(_UpperCAmelCase ): if adjaceny_matrix[edgea][edgea] != "-": lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] ) lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase ) lowerCAmelCase = graph.prims_algorithm() lowerCAmelCase = sum(graph.edges.values() ) lowerCAmelCase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
4
0
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : List[str]=1_0 ): __a : Optional[Any] = [] for _ in range(lowerCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=1_0 ): __a : Union[str, Any] = [] for step in range(lowerCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: __a : List[Any] = os.path.join(lowerCamelCase_ , 'schedule.bin' ) torch.save(scheduler.state_dict() , lowerCamelCase_ ) __a : Tuple = torch.load(lowerCamelCase_ ) scheduler.load_state_dict(lowerCamelCase_ ) return lrs @require_torch class _UpperCamelCase( unittest.TestCase ): def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' __a : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ ) __a : List[str] = torch.tensor([0.4, 0.2, -0.5] ) __a : List[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __a : List[Any] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): __a : int = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' __a : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) __a : Union[str, Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __a : int = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE__ , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE__ , scale_parameter=SCREAMING_SNAKE_CASE__ , warmup_init=SCREAMING_SNAKE_CASE__ , ) for _ in range(1_0_0_0 ): __a : str = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class _UpperCamelCase( unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None __SCREAMING_SNAKE_CASE : str = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __SCREAMING_SNAKE_CASE : str = 10 def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict=None ): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ , msg=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Any ): '''simple docstring''' __a : str = {'num_warmup_steps': 2, 'num_training_steps': 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) __a : Any = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): __a , __a : str = data __a : Tuple = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) __a : List[Any] = unwrap_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps ) self.assertListAlmostEqual( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) __a : Dict = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE__ ) # wrap to test picklability of the schedule __a : Optional[int] = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , msg=f'''failed for {scheduler_func} in save and reload''' ) class _UpperCamelCase: def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' __a : str = fn def __call__( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' return self.fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @classmethod def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' __a : List[Any] = list(map(self , scheduler.lr_lambdas ) )
47
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] ) lowerCAmelCase = np.array(_UpperCAmelCase ) lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = (1, 2, 1) lowerCAmelCase = (1, 1, 0, 7) lowerCAmelCase = SARIMAX( _UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase ) lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' ) lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] ) return result[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = regressor.predict(_UpperCAmelCase ) return y_pred[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ): train_user.sort() lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 ) lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 ) lowerCAmelCase = qa - qa lowerCAmelCase = qa - (iqr * 0.1) return low_lim def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ): lowerCAmelCase = 0 lowerCAmelCase = 0 for i in list_vote: if i > actual_result: lowerCAmelCase = not_safe + 1 else: if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]] __UpperCamelCase : Any = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) __UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values) # split data __UpperCamelCase : Dict = normalize_df[:, 2].tolist() __UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist() __UpperCamelCase : List[str] = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist() __UpperCamelCase : Tuple = x[: len(x) - 1] __UpperCamelCase : Any = x[len(x) - 1 :] # for linear regression & sarimax __UpperCamelCase : str = total_date[: len(total_date) - 1] __UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1] __UpperCamelCase : List[Any] = total_match[: len(total_match) - 1] __UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :] __UpperCamelCase : str = total_user[len(total_user) - 1 :] __UpperCamelCase : str = total_match[len(total_match) - 1 :] # voting system with forecasting __UpperCamelCase : Any = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
4
0
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class A : def __init__( self : Dict , __magic_name__ : Collection[float] | None = None ): """simple docstring""" if components is None: lowerCAmelCase__ = [] lowerCAmelCase__ = list(__magic_name__ ) def __len__( self : Optional[Any] ): """simple docstring""" return len(self.__components ) def __str__( self : Optional[Any] ): """simple docstring""" return "(" + ",".join(map(__magic_name__ , self.__components ) ) + ")" def __add__( self : Dict , __magic_name__ : Vector ): """simple docstring""" lowerCAmelCase__ = len(self ) if size == len(__magic_name__ ): lowerCAmelCase__ = [self.__components[i] + other.component(__magic_name__ ) for i in range(__magic_name__ )] return Vector(__magic_name__ ) else: raise Exception("must have the same size" ) def __sub__( self : Tuple , __magic_name__ : Vector ): """simple docstring""" lowerCAmelCase__ = len(self ) if size == len(__magic_name__ ): lowerCAmelCase__ = [self.__components[i] - other.component(__magic_name__ ) for i in range(__magic_name__ )] return Vector(__magic_name__ ) else: # error case raise Exception("must have the same size" ) @overload def __mul__( self : Optional[Any] , __magic_name__ : float ): """simple docstring""" ... @overload def __mul__( self : Union[str, Any] , __magic_name__ : Vector ): """simple docstring""" ... def __mul__( self : Union[str, Any] , __magic_name__ : float | Vector ): """simple docstring""" if isinstance(__magic_name__ , (float, int) ): lowerCAmelCase__ = [c * other for c in self.__components] return Vector(__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ) and len(self ) == len(__magic_name__ ): lowerCAmelCase__ = len(self ) lowerCAmelCase__ = [self.__components[i] * other.component(__magic_name__ ) for i in range(__magic_name__ )] return sum(__magic_name__ ) else: # error case raise Exception("invalid operand!" ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return Vector(self.__components ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception("index out of range" ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : int , __magic_name__ : float ): """simple docstring""" assert -len(self.__components ) <= pos < len(self.__components ) lowerCAmelCase__ = value def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" if len(self.__components ) == 0: raise Exception("Vector is empty" ) lowerCAmelCase__ = [c**2 for c in self.__components] return math.sqrt(sum(__magic_name__ ) ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Vector , __magic_name__ : bool = False ): """simple docstring""" lowerCAmelCase__ = self * other lowerCAmelCase__ = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def A ( UpperCamelCase_ : int ) -> Vector: '''simple docstring''' assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) return Vector([0] * dimension ) def A ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Vector: '''simple docstring''' assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (isinstance(UpperCamelCase_ , UpperCamelCase_ )) lowerCAmelCase__ = [0] * dimension lowerCAmelCase__ = 1 return Vector(UpperCamelCase_ ) def A ( UpperCamelCase_ : float , UpperCamelCase_ : Vector , UpperCamelCase_ : Vector ) -> Vector: '''simple docstring''' assert ( isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (isinstance(UpperCamelCase_ , (int, float) )) ) return x * scalar + y def A ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Vector: '''simple docstring''' random.seed(UpperCamelCase_ ) lowerCAmelCase__ = [random.randint(UpperCamelCase_ , UpperCamelCase_ ) for _ in range(UpperCamelCase_ )] return Vector(UpperCamelCase_ ) class A : def __init__( self : Dict , __magic_name__ : list[list[float]] , __magic_name__ : int , __magic_name__ : int ): """simple docstring""" lowerCAmelCase__ = matrix lowerCAmelCase__ = w lowerCAmelCase__ = h def __str__( self : str ): """simple docstring""" lowerCAmelCase__ = "" for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self : int , __magic_name__ : Matrix ): """simple docstring""" if self.__width == other.width() and self.__height == other.height(): lowerCAmelCase__ = [] for i in range(self.__height ): lowerCAmelCase__ = [ self.__matrix[i][j] + other.component(__magic_name__ , __magic_name__ ) for j in range(self.__width ) ] matrix.append(__magic_name__ ) return Matrix(__magic_name__ , self.__width , self.__height ) else: raise Exception("matrix must have the same dimension!" ) def __sub__( self : Dict , __magic_name__ : Matrix ): """simple docstring""" if self.__width == other.width() and self.__height == other.height(): lowerCAmelCase__ = [] for i in range(self.__height ): lowerCAmelCase__ = [ self.__matrix[i][j] - other.component(__magic_name__ , __magic_name__ ) for j in range(self.__width ) ] matrix.append(__magic_name__ ) return Matrix(__magic_name__ , self.__width , self.__height ) else: raise Exception("matrices must have the same dimension!" ) @overload def __mul__( self : Optional[Any] , __magic_name__ : float ): """simple docstring""" ... @overload def __mul__( self : Optional[Any] , __magic_name__ : Vector ): """simple docstring""" ... def __mul__( self : Optional[int] , __magic_name__ : float | Vector ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): # matrix-vector if len(__magic_name__ ) == self.__width: lowerCAmelCase__ = zero_vector(self.__height ) for i in range(self.__height ): lowerCAmelCase__ = [ self.__matrix[i][j] * other.component(__magic_name__ ) for j in range(self.__width ) ] ans.change_component(__magic_name__ , sum(__magic_name__ ) ) return ans else: raise Exception( "vector must have the same size as the " "number of columns of the matrix!" ) elif isinstance(__magic_name__ , (int, float) ): # matrix-scalar lowerCAmelCase__ = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(__magic_name__ , self.__width , self.__height ) return None def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return self.__height def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return self.__width def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ): """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("change_component: indices out of bounds" ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float ): """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: lowerCAmelCase__ = value else: raise Exception("change_component: indices out of bounds" ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int , __magic_name__ : int ): """simple docstring""" if self.__height != self.__width: raise Exception("Matrix is not square" ) lowerCAmelCase__ = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__magic_name__ ) ): lowerCAmelCase__ = minor[i][:y] + minor[i][y + 1 :] return Matrix(__magic_name__ , self.__width - 1 , self.__height - 1 ).determinant() def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int , __magic_name__ : int ): """simple docstring""" if self.__height != self.__width: raise Exception("Matrix is not square" ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__magic_name__ , __magic_name__ ) else: raise Exception("Indices out of bounds" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if self.__height != self.__width: raise Exception("Matrix is not square" ) if self.__height < 1: raise Exception("Matrix has no element" ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: lowerCAmelCase__ = [ self.__matrix[0][y] * self.cofactor(0 , __magic_name__ ) for y in range(self.__width ) ] return sum(__magic_name__ ) def A ( UpperCamelCase_ : int ) -> Matrix: '''simple docstring''' lowerCAmelCase__ = [[0] * n for _ in range(UpperCamelCase_ )] return Matrix(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def A ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Matrix: '''simple docstring''' random.seed(UpperCamelCase_ ) lowerCAmelCase__ = [ [random.randint(UpperCamelCase_ , UpperCamelCase_ ) for _ in range(UpperCamelCase_ )] for _ in range(UpperCamelCase_ ) ] return Matrix(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
48
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , ) lowerCAmelCase = parser.parse_args() return args def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ): if not len(_UpperCAmelCase ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) lowerCAmelCase ,lowerCAmelCase = imgs[0].size lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) ) lowerCAmelCase ,lowerCAmelCase = grid.size for i, img in enumerate(_UpperCAmelCase ): grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) ) return grid def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ): lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase ) lowerCAmelCase = pipeline( _UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) ) lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __UpperCamelCase : Optional[Any] = parse_args() # Load models and create wrapper for stable diffusion __UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') __UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') __UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') __UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') __UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): __UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: __UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id)) __UpperCamelCase : Optional[Any] = pipeline.to(unet.device) __UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) __UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
4
0
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _UpperCAmelCase : def __init__( self : Tuple ): __UpperCAmelCase = '''''' __UpperCAmelCase = '''''' __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = 2_56 __UpperCAmelCase = 0 __UpperCAmelCase = 0 __UpperCAmelCase = 0 __UpperCAmelCase = 0 def a ( self : List[Any] , _lowercase : List[Any] ): __UpperCAmelCase = cva.imread(_lowercase , 0 ) __UpperCAmelCase = copy.deepcopy(self.img ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' ) __UpperCAmelCase = np.sum(_lowercase ) for i in range(len(_lowercase ) ): __UpperCAmelCase = x[i] / self.k self.sk += prk __UpperCAmelCase = (self.L - 1) * self.sk if self.rem != 0: __UpperCAmelCase = int(last % last ) __UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(_lowercase ) __UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size ) __UpperCAmelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): __UpperCAmelCase = self.img[j][i] if num != self.last_list[num]: __UpperCAmelCase = self.last_list[num] cva.imwrite('''output_data/output.jpg''' , self.img ) def a ( self : Tuple ): plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def a ( self : Union[str, Any] ): cva.imshow('''Output-Image''' , self.img ) cva.imshow('''Input-Image''' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": _lowercase : Optional[int] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') _lowercase : Union[str, Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
49
"""simple docstring""" import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __UpperCamelCase : List[Any] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ): lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}' dest_layers.load_state_dict(layers_to_copy.state_dict() ) __UpperCamelCase : Optional[Any] = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __UpperCamelCase : int = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ): try: lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first' F' {n_student}' ) return list(range(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ): if n_student > n_teacher: raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' ) elif n_teacher == n_student: return list(range(_UpperCAmelCase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ): lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(_UpperCAmelCase , _UpperCAmelCase ): AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval() else: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}' lowerCAmelCase = teacher.config.to_diff_dict() try: lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: lowerCAmelCase = teacher_e if d is None: lowerCAmelCase = teacher_d init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} ) except AttributeError: # T5 if hasattr(teacher.config , 'num_encoder_layers' ): lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: lowerCAmelCase = teacher_e if d is None: lowerCAmelCase = teacher_d if hasattr(teacher.config , 'num_encoder_layers' ): init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} ) else: init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(_UpperCAmelCase ) # Copy weights lowerCAmelCase = teacher.config_class(**_UpperCAmelCase ) lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to' F' {save_path}' ) student.save_pretrained(_UpperCAmelCase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) if d_layers_to_copy is None: lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) try: if hasattr( _UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase ) copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' ) lowerCAmelCase = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(_UpperCAmelCase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
4
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
0
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =VQModel _lowerCamelCase ="sample" @property def __snake_case ( self : List[str] , a__ : Optional[int]=(32, 32) ): UpperCAmelCase = 4 UpperCAmelCase = 3 UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(a__ ) return {"sample": image} @property def __snake_case ( self : Any ): return (3, 32, 32) @property def __snake_case ( self : Dict ): return (3, 32, 32) def __snake_case ( self : Dict ): UpperCAmelCase = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 3, } UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def __snake_case ( self : List[str] ): pass def __snake_case ( self : List[Any] ): pass def __snake_case ( self : int ): UpperCAmelCase, UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=a__ ) self.assertIsNotNone(a__ ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(a__ ) UpperCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def __snake_case ( self : List[str] ): UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' ) model.to(a__ ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) UpperCAmelCase = image.to(a__ ) with torch.no_grad(): UpperCAmelCase = model(a__ ).sample UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off UpperCAmelCase = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] ) # fmt: on self.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) )
51
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): lowerCAmelCase = 0.00 lowerCAmelCase = 0 for resistor in resistors: if resistor <= 0: lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!' raise ValueError(_UpperCAmelCase ) first_sum += 1 / float(_UpperCAmelCase ) index += 1 return 1 / first_sum def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): lowerCAmelCase = 0.00 lowerCAmelCase = 0 for resistor in resistors: sum_r += resistor if resistor < 0: lowerCAmelCase = F'Resistor at index {index} has a negative value!' raise ValueError(_UpperCAmelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
4
0
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch A = random.Random() def __A ( a_ :List[str] , a_ :int=1.0 , a_ :Optional[Any]=None , a_ :int=None) -> List[Any]: if rng is None: __a : int = global_rng __a : Optional[Any] = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch class __lowercase ( unittest.TestCase ): '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=16000 , _UpperCAmelCase=True , _UpperCAmelCase=80 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase="hann_window" , _UpperCAmelCase=80 , _UpperCAmelCase=7600 , _UpperCAmelCase=1e-1_0 , _UpperCAmelCase=True , ): __a : Optional[Any] = parent __a : int = batch_size __a : Optional[int] = min_seq_length __a : Any = max_seq_length __a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __a : Union[str, Any] = feature_size __a : Optional[int] = padding_value __a : int = sampling_rate __a : str = do_normalize __a : int = num_mel_bins __a : Dict = hop_length __a : Dict = win_length __a : Dict = win_function __a : Optional[Any] = fmin __a : Union[str, Any] = fmax __a : Tuple = mel_floor __a : Optional[Any] = return_attention_mask def _lowerCamelCase ( self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ): def _flatten(_UpperCAmelCase ): return list(itertools.chain(*_UpperCAmelCase ) ) if equal_length: __a : Optional[int] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __a : Optional[int] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __a : Any = [np.asarray(_UpperCAmelCase ) for x in speech_inputs] return speech_inputs def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ): if equal_length: __a : int = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __a : Optional[int] = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __a : List[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch class __lowercase ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = SpeechTaFeatureExtractor def _lowerCamelCase ( self ): __a : Union[str, Any] = SpeechTaFeatureExtractionTester(self ) def _lowerCamelCase ( self , _UpperCAmelCase ): self.assertTrue(np.all(np.mean(_UpperCAmelCase , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) ) def _lowerCamelCase ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus __a : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs] # Test not batched input __a : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values __a : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) ) # Test batched __a : Any = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values __a : List[Any] = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) ) def _lowerCamelCase ( self ): __a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad'''] __a : Tuple = [None, 1600, None] for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ): __a : Union[str, Any] = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='''np''' ) __a : str = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def _lowerCamelCase ( self ): __a : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a : Any = range(800 , 1400 , 200 ) __a : Dict = [floats_list((1, x) )[0] for x in lengths] __a : int = ['''longest''', '''max_length''', '''do_not_pad'''] __a : Any = [None, 1600, None] for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ): __a : int = feat_extract(_UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase ) __a : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def _lowerCamelCase ( self ): __a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a : int = feat_extract( _UpperCAmelCase , truncation=_UpperCAmelCase , max_length=1000 , padding='''max_length''' , return_tensors='''np''' ) __a : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def _lowerCamelCase ( self ): __a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a : List[str] = feat_extract( _UpperCAmelCase , truncation=_UpperCAmelCase , max_length=1000 , padding='''longest''' , return_tensors='''np''' ) __a : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) __a : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a : Optional[int] = feat_extract( _UpperCAmelCase , truncation=_UpperCAmelCase , max_length=2000 , padding='''longest''' , return_tensors='''np''' ) __a : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def _lowerCamelCase ( self ): __a : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a : Optional[int] = np.random.rand(100 ).astype(np.floataa ) __a : Any = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __a : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __a : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _lowerCamelCase ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus __a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __a : Tuple = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs] # Test feature size __a : Union[str, Any] = feature_extractor(audio_target=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input __a : Tuple = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values __a : int = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) ) # Test batched __a : Union[str, Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values __a : Union[str, Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __a : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)] __a : str = np.asarray(_UpperCAmelCase ) __a : List[str] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values __a : str = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) ) def _lowerCamelCase ( self ): __a : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target() __a : int = self.feature_extraction_class(**self.feat_extract_dict ) __a : Tuple = feat_extract.model_input_names[0] __a : int = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) ) __a : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase ) __a : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) __a : Any = processed_features[input_name] if len(batch_features_input.shape ) < 3: __a : str = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowerCamelCase ( self ): __a : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase ) __a : Dict = self.feature_extraction_class(**self.feat_extract_dict ) __a : List[str] = feat_extract.model_input_names[0] __a : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) __a : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: __a : List[str] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowerCamelCase ( self ): __a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) __a : str = self.feat_extract_tester.prepare_inputs_for_target() __a : Tuple = feat_extract.model_input_names[0] __a : Optional[int] = BatchFeature({input_name: speech_inputs} ) __a : int = feat_extract.num_mel_bins # hack! __a : Any = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name] __a : Optional[int] = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def _lowerCamelCase ( self ): __a : Union[str, Any] = self.feat_extract_dict __a : str = True __a : Dict = self.feature_extraction_class(**_UpperCAmelCase ) __a : int = self.feat_extract_tester.prepare_inputs_for_target() __a : Optional[Any] = [len(_UpperCAmelCase ) for x in speech_inputs] __a : Any = feat_extract.model_input_names[0] __a : Dict = BatchFeature({input_name: speech_inputs} ) __a : Any = feat_extract.num_mel_bins # hack! __a : Tuple = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _UpperCAmelCase ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase ) def _lowerCamelCase ( self ): __a : Any = self.feat_extract_dict __a : Dict = True __a : List[str] = self.feature_extraction_class(**_UpperCAmelCase ) __a : str = self.feat_extract_tester.prepare_inputs_for_target() __a : Optional[int] = [len(_UpperCAmelCase ) for x in speech_inputs] __a : Tuple = feat_extract.model_input_names[0] __a : Optional[Any] = BatchFeature({input_name: speech_inputs} ) __a : Tuple = min(_UpperCAmelCase ) __a : str = feat_extract.num_mel_bins # hack! __a : Dict = feat_extract.pad( _UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _UpperCAmelCase ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def _lowerCamelCase ( self , _UpperCAmelCase ): from datasets import load_dataset __a : Dict = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech __a : Tuple = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def _lowerCamelCase ( self ): # fmt: off __a : List[Any] = torch.tensor( [2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3, 3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3, 2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4, 4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3, 7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4, 4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] ) # fmt: on __a : Union[str, Any] = self._load_datasamples(1 ) __a : str = SpeechTaFeatureExtractor() __a : List[str] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 93680) ) self.assertTrue(torch.allclose(input_values[0, :30] , _UpperCAmelCase , atol=1e-6 ) ) def _lowerCamelCase ( self ): # fmt: off __a : Tuple = torch.tensor( [-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7, -3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6, -3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1, -3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] ) # fmt: on __a : Dict = self._load_datasamples(1 ) __a : Any = SpeechTaFeatureExtractor() __a : List[Any] = feature_extractor(audio_target=_UpperCAmelCase , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCAmelCase , atol=1e-4 ) )
52
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : Tuple = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class a ( a__ ): snake_case__ = '''glpn''' def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) lowerCAmelCase = num_channels lowerCAmelCase = num_encoder_blocks lowerCAmelCase = depths lowerCAmelCase = sr_ratios lowerCAmelCase = hidden_sizes lowerCAmelCase = patch_sizes lowerCAmelCase = strides lowerCAmelCase = mlp_ratios lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = drop_path_rate lowerCAmelCase = layer_norm_eps lowerCAmelCase = decoder_hidden_size lowerCAmelCase = max_depth lowerCAmelCase = head_in_index
4
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_UpperCamelCase ) class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) a_ = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} ) a_ = Features( { """answers""": Sequence( { """text""": Value("""string""" ), """answer_start""": Value("""int32""" ), } ) } ) a_ = "question" a_ = "context" a_ = "answers" @property def lowercase ( self : Any ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
53
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = range_bbox def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase = bbox[i, j, 3] lowerCAmelCase = bbox[i, j, 1] lowerCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase = bbox[i, j, 2] lowerCAmelCase = bbox[i, j, 0] lowerCAmelCase = t lowerCAmelCase = tf.convert_to_tensor(_snake_case ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) snake_case__ = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = True snake_case__ = 1_0 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def _SCREAMING_SNAKE_CASE (): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the sequence output on [0, :3, :3] lowerCAmelCase = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) ) # test the pooled output on [1, :3] lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCAmelCase = outputs.loss lowerCAmelCase = (2,) self.assertEqual(loss.shape , _snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = (2, 2) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the shape of the logits lowerCAmelCase = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _snake_case ) self.assertEqual(outputs.end_logits.shape , _snake_case )
4
0
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def a__ ( ): '''simple docstring''' UpperCAmelCase_ =HfArgumentParser(lowercase__ ) UpperCAmelCase_ =parser.parse_args_into_dataclasses()[0] UpperCAmelCase_ =TensorFlowBenchmark(args=lowercase__ ) try: UpperCAmelCase_ =parser.parse_args_into_dataclasses()[0] except ValueError as e: UpperCAmelCase_ ="Arg --no_{0} is no longer used, please use --no-{0} instead." UpperCAmelCase_ =" ".join(str(lowercase__ ).split(" " )[:-1] ) UpperCAmelCase_ ="" UpperCAmelCase_ =eval(str(lowercase__ ).split(" " )[-1] ) UpperCAmelCase_ =[] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowercase__ ) if len(lowercase__ ) > 0: UpperCAmelCase_ =full_error_msg + begin_error_msg + str(lowercase__ ) raise ValueError(lowercase__ ) benchmark.run() if __name__ == "__main__": main()
54
"""simple docstring""" import argparse import os import re import packaging.version __UpperCamelCase : Union[str, Any] = '''examples/''' __UpperCamelCase : str = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __UpperCamelCase : List[str] = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __UpperCamelCase : Optional[int] = '''README.md''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ): with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.read() lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern] lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase ) lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): for folder, directories, fnames in os.walk(_UpperCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not patch: update_version_in_examples(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '🤗 Transformers currently provides the following architectures' lowerCAmelCase = '1. Want to contribute a new model?' with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.readlines() # Find the start of the list. lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): lowerCAmelCase = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): with open(REPLACE_FILES['init'] , 'r' ) as f: lowerCAmelCase = f.read() lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0] return packaging.version.parse(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ): lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: lowerCAmelCase = default_version.base_version elif patch: lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' ) if len(_UpperCAmelCase ) == 0: lowerCAmelCase = default_version print(F'Updating version to {version}.' ) global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = get_version() lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0' lowerCAmelCase = current_version.base_version # Check with the user we got that right. lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' ) if len(_UpperCAmelCase ) == 0: lowerCAmelCase = dev_version print(F'Updating version to {version}.' ) global_version_update(_UpperCAmelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __UpperCamelCase : Optional[int] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
4
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = ShapEPipeline snake_case_ = ["prompt"] snake_case_ = ["prompt"] snake_case_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] snake_case_ = False @property def UpperCamelCase_ ( self : List[Any] ): return 32 @property def UpperCamelCase_ ( self : Optional[int] ): return 32 @property def UpperCamelCase_ ( self : Tuple ): return self.time_input_dim * 4 @property def UpperCamelCase_ ( self : Union[str, Any] ): return 8 @property def UpperCamelCase_ ( self : Tuple ): __A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def UpperCamelCase_ ( self : int ): torch.manual_seed(0 ) __A = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) return CLIPTextModelWithProjection(A ) @property def UpperCamelCase_ ( self : List[Any] ): torch.manual_seed(0 ) __A = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __A = PriorTransformer(**A ) return model @property def UpperCamelCase_ ( self : Union[str, Any] ): torch.manual_seed(0 ) __A = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __A = ShapERenderer(**A ) return model def UpperCamelCase_ ( self : List[str] ): __A = self.dummy_prior __A = self.dummy_text_encoder __A = self.dummy_tokenizer __A = self.dummy_renderer __A = HeunDiscreteScheduler( beta_schedule="exp" ,num_train_timesteps=10_24 ,prediction_type="sample" ,use_karras_sigmas=A ,clip_sample=A ,clip_sample_range=1.0 ,) __A = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def UpperCamelCase_ ( self : Dict ,A : Optional[Any] ,A : Any=0 ): if str(A ).startswith("mps" ): __A = torch.manual_seed(A ) else: __A = torch.Generator(device=A ).manual_seed(A ) __A = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def UpperCamelCase_ ( self : List[str] ): __A = "cpu" __A = self.get_dummy_components() __A = self.pipeline_class(**A ) __A = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) __A = pipe(**self.get_dummy_inputs(A ) ) __A = output.images[0] __A = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __A = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self : Optional[Any] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase_ ( self : int ): __A = torch_device == "cpu" __A = True self._test_inference_batch_single_identical( batch_size=2 ,test_max_difference=A ,relax_max_difference=A ,) def UpperCamelCase_ ( self : Optional[Any] ): __A = self.get_dummy_components() __A = self.pipeline_class(**A ) __A = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) __A = 1 __A = 2 __A = self.get_dummy_inputs(A ) for key in inputs.keys(): if key in self.batch_params: __A = batch_size * [inputs[key]] __A = pipe(**A ,num_images_per_prompt=A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : Optional[int] ): __A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy" ) __A = ShapEPipeline.from_pretrained("openai/shap-e" ) __A = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) __A = torch.Generator(device=A ).manual_seed(0 ) __A = pipe( "a shark" ,generator=A ,guidance_scale=15.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="np" ,).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(A ,A )
55
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __UpperCamelCase : Optional[int] = pytest.mark.integration @require_faiss class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} ) return dset def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() lowerCAmelCase = dset.map( lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case ) lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def UpperCamelCase__ ( self ): """simple docstring""" from elasticsearch import Elasticsearch lowerCAmelCase = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: lowerCAmelCase = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} lowerCAmelCase = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_snake_case ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1] lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case ) self.assertRaises(_snake_case , index.search_batch , queries[0] ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) lowerCAmelCase = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_snake_case ): lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = faiss.IndexFlat(5 ) lowerCAmelCase = FaissIndex(custom_index=_snake_case ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file: index.save(tmp_file.name ) lowerCAmelCase = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ): import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) lowerCAmelCase = 'index.faiss' lowerCAmelCase = F'mock://{index_name}' index.save(_UpperCAmelCase , storage_options=mockfs.storage_options ) lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options ) lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: lowerCAmelCase = Elasticsearch() lowerCAmelCase = {'acknowledged': True} lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query lowerCAmelCase = 'foo' lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout lowerCAmelCase = 'foo' lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries lowerCAmelCase = ['foo', 'bar', 'foobar'] lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([1, 1, 1] , _snake_case ) # batched queries with timeout lowerCAmelCase = ['foo', 'bar', 'foobar'] lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([1, 1, 1] , _snake_case )
4
0
'''simple docstring''' def _a (lowercase__ : int , lowercase__ : int ) -> float: """simple docstring""" return base * power(lowercase__ , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print("Raise base to the power of exponent using recursion...") _a : Union[str, Any] = int(input("Enter the base: ").strip()) _a : Any = int(input("Enter the exponent: ").strip()) _a : List[str] = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents _a : List[Any] = 1 / result print(f'''{base} to the power of {exponent} is {result}''')
56
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class a ( a__ , a__ , unittest.TestCase ): snake_case__ = IFInpaintingPipeline snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase__ ( self ): """simple docstring""" return self._get_dummy_components() def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def UpperCamelCase__ ( self ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_local() def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
4
0
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex A_ : str = logging.getLogger(__name__) class _lowerCAmelCase: """simple docstring""" def __init__( self ): UpperCamelCase_: Optional[int] = False def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if not self.initialized: UpperCamelCase_: Optional[Any] = RagRetriever( _lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , ) UpperCamelCase_: str = True def _a ( self ): self.retriever.index.init_index() def _a ( self , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_ ,UpperCamelCase_: Any = self.retriever._main_retrieve(_lowerCamelCase , _lowerCamelCase ) return doc_ids, retrieved_doc_embeds class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ): if index is not None and index.is_initialized() and len(_lowerCamelCase ) > 0: raise ValueError( 'When using Ray for distributed fine-tuning, ' 'you\'ll need to provide the paths instead, ' 'as the dataset and the index are loaded ' 'separately. More info in examples/rag/use_own_knowledge_dataset.py ' ) super().__init__( _lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , ) UpperCamelCase_: List[str] = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for worker in self.retrieval_workers ] ) def _a ( self ): logger.info('initializing retrieval' ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _a ( self , _lowerCamelCase , _lowerCamelCase ): if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. UpperCamelCase_: Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] UpperCamelCase_ ,UpperCamelCase_: str = ray.get(random_worker.retrieve.remote(_lowerCamelCase , _lowerCamelCase ) ) else: UpperCamelCase_ ,UpperCamelCase_: Dict = self._main_retrieve(_lowerCamelCase , _lowerCamelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase ) @classmethod def _a ( cls , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ): return super(_lowerCamelCase , cls ).get_tokenizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) @classmethod def _a ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ): UpperCamelCase_: List[str] = kwargs.pop('config' , _lowerCamelCase ) or RagConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) UpperCamelCase_: Optional[int] = RagTokenizer.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) UpperCamelCase_: List[str] = rag_tokenizer.question_encoder UpperCamelCase_: List[Any] = rag_tokenizer.generator if indexed_dataset is not None: UpperCamelCase_: Union[str, Any] = 'custom' UpperCamelCase_: int = CustomHFIndex(config.retrieval_vector_size , _lowerCamelCase ) else: UpperCamelCase_: str = cls._build_index(_lowerCamelCase ) return cls( _lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , retrieval_workers=_lowerCamelCase , index=_lowerCamelCase , )
57
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = self.vocab_size - 1 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case ) lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case__ = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , ) lowerCAmelCase = inputs_dict['labels'] lowerCAmelCase = inputs_dict['labels'] lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , ) lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_snake_case ) return inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenAIGPTModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @require_torch class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(_snake_case ) lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is lowerCAmelCase = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case ) self.assertListEqual(output_ids[0].tolist() , _snake_case )
4
0
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowercase = "cpu" , _lowercase = "openai/clip-vit-large-patch14" ) -> None: '''simple docstring''' snake_case_ : Optional[Any] = device snake_case_ : List[Any] = CLIPTokenizerFast.from_pretrained(_lowercase ) snake_case_ : Optional[int] = [0.4814_5466, 0.457_8275, 0.4082_1073] snake_case_ : Union[str, Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711] snake_case_ : str = torchvision.transforms.Normalize(self.image_mean , self.image_std ) snake_case_ : Any = torchvision.transforms.Resize(2_2_4 ) snake_case_ : str = torchvision.transforms.CenterCrop(2_2_4 ) def UpperCAmelCase__ ( self , _lowercase ) -> Dict: '''simple docstring''' snake_case_ : int = self.resize(_lowercase ) snake_case_ : int = self.center_crop(_lowercase ) snake_case_ : Union[str, Any] = self.normalize(_lowercase ) return images def __call__( self , _lowercase=None , _lowercase=None , **_lowercase ) -> Tuple: '''simple docstring''' snake_case_ : Dict = self.tokenizer(text=_lowercase , **_lowercase ) snake_case_ : Optional[int] = self.preprocess_img(_lowercase ) snake_case_ : Optional[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , _lowercase=1_0 , _lowercase=0.01 , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=True , _lowercase="image" , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , ) -> None: '''simple docstring''' super().__init__() snake_case_ : List[Any] = None snake_case_ : List[str] = device if device else get_device() if vqgan: snake_case_ : str = vqgan else: snake_case_ : Any = load_vqgan(self.device , conf_path=_lowercase , ckpt_path=_lowercase ) self.vqgan.eval() if clip: snake_case_ : int = clip else: snake_case_ : List[str] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) snake_case_ : Tuple = ProcessorGradientFlow(device=self.device ) snake_case_ : int = iterations snake_case_ : str = lr snake_case_ : int = log snake_case_ : List[Any] = make_grid snake_case_ : Tuple = return_val snake_case_ : Any = quantize snake_case_ : Optional[int] = self.vqgan.decoder.z_shape def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=5 , _lowercase=True ) -> int: '''simple docstring''' snake_case_ : Tuple = [] if output_path is None: snake_case_ : List[Any] = """./animation.gif""" if input_path is None: snake_case_ : Dict = self.save_path snake_case_ : Optional[Any] = sorted(glob(input_path + """/*""" ) ) if not len(_lowercase ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(_lowercase ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) snake_case_ : List[Any] = total_duration / len(_lowercase ) snake_case_ : List[str] = [frame_duration] * len(_lowercase ) if extend_frames: snake_case_ : Dict = 1.5 snake_case_ : Union[str, Any] = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(_lowercase ) ) imageio.mimsave(_lowercase , _lowercase , duration=_lowercase ) print(f'gif saved to {output_path}' ) def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None ) -> Tuple: '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError snake_case_ : Any = preprocess(Image.open(_lowercase ) , target_image_size=2_5_6 ).to(self.device ) snake_case_ : Dict = preprocess_vqgan(_lowercase ) snake_case_ , *snake_case_ : str = self.vqgan.encode(_lowercase ) return z def UpperCAmelCase__ ( self , _lowercase ) -> List[str]: '''simple docstring''' snake_case_ : List[Any] = self.latent.detach().requires_grad_() snake_case_ : Union[str, Any] = base_latent + transform_vector if self.quantize: snake_case_ , *snake_case_ : Optional[int] = self.vqgan.quantize(_lowercase ) else: snake_case_ : List[str] = trans_latent return self.vqgan.decode(_lowercase ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=None ) -> int: '''simple docstring''' snake_case_ : Tuple = self.clip_preprocessor(text=_lowercase , images=_lowercase , return_tensors="""pt""" , padding=_lowercase ) snake_case_ : Union[str, Any] = self.clip(**_lowercase ) snake_case_ : List[Any] = clip_outputs.logits_per_image if weights is not None: snake_case_ : int = similarity_logits * weights return similarity_logits.sum() def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""] , _lowercase , weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: snake_case_ : Tuple = self._get_clip_similarity(neg_prompts["""prompts"""] , _lowercase , weights=neg_prompts["""weights"""] ) else: snake_case_ : Any = torch.tensor([1] , device=self.device ) snake_case_ : Any = -torch.log(_lowercase ) + torch.log(_lowercase ) return loss def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[str]: '''simple docstring''' snake_case_ : List[str] = torch.randn_like(self.latent , requires_grad=_lowercase , device=self.device ) snake_case_ : Dict = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() snake_case_ : List[str] = self._add_vector(_lowercase ) snake_case_ : Union[str, Any] = loop_post_process(_lowercase ) snake_case_ : int = self._get_CLIP_loss(_lowercase , _lowercase , _lowercase ) print("""CLIP loss""" , _lowercase ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=_lowercase ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[Any]: '''simple docstring''' wandb.init(reinit=_lowercase , project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: snake_case_ : Tuple = Image.open(_lowercase ) snake_case_ : str = image.resize((2_5_6, 2_5_6) ) wandb.log("""Original Image""" , wandb.Image(_lowercase ) ) def UpperCAmelCase__ ( self , _lowercase ) -> Any: '''simple docstring''' if not prompts: return [] snake_case_ : List[str] = [] snake_case_ : str = [] if isinstance(_lowercase , _lowercase ): snake_case_ : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(_lowercase , (tuple, list) ): snake_case_ : Union[str, Any] = prompt[0] snake_case_ : Dict = float(prompt[1] ) elif ":" in prompt: snake_case_ , snake_case_ : Tuple = prompt.split(""":""" ) snake_case_ : Optional[Any] = float(_lowercase ) else: snake_case_ : Dict = prompt snake_case_ : Optional[Any] = 1.0 processed_prompts.append(_lowercase ) weights.append(_lowercase ) return { "prompts": processed_prompts, "weights": torch.tensor(_lowercase , device=self.device ), } def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if image_path: snake_case_ : Tuple = self._get_latent(_lowercase ) else: snake_case_ : Dict = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(_lowercase , _lowercase , _lowercase ) assert pos_prompts, "You must provide at least one positive prompt." snake_case_ : Tuple = self.process_prompts(_lowercase ) snake_case_ : int = self.process_prompts(_lowercase ) if save_final and save_path is None: snake_case_ : Optional[Any] = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(_lowercase ): os.makedirs(_lowercase ) else: snake_case_ : List[str] = save_path + """_""" + get_timestamp() os.makedirs(_lowercase ) snake_case_ : List[str] = save_path snake_case_ : Dict = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(_lowercase ) ) snake_case_ : Tuple = loop_post_process(_lowercase ) for iter, transformed_img in enumerate(self._optimize_CLIP(_lowercase , _lowercase , _lowercase ) ): if show_intermediate: show_pil(_lowercase ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}.png' ) ) if self.log: wandb.log({"""Image""": wandb.Image(_lowercase )} ) if show_final: show_pil(_lowercase ) if save_final: transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}_final.png' ) )
58
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __UpperCamelCase : str = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' ) parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' ) parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' ) lowerCAmelCase = parser.parse_args() logger.info(F'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]` lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>` lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(F'Loading text from {args.file_path}' ) with open(args.file_path , 'r' , encoding='utf8' ) as fp: lowerCAmelCase = fp.readlines() logger.info('Start encoding' ) logger.info(F'{len(_UpperCAmelCase )} examples to process.' ) lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 1_0000 lowerCAmelCase = time.time() for text in data: lowerCAmelCase = F'{bos} {text.strip()} {sep}' lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) rslt.append(_UpperCAmelCase ) iter += 1 if iter % interval == 0: lowerCAmelCase = time.time() logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) lowerCAmelCase = time.time() logger.info('Finished binarization' ) logger.info(F'{len(_UpperCAmelCase )} examples processed.' ) lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle' lowerCAmelCase = tokenizer.vocab_size if vocab_size < (1 << 16): lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt] else: lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'Dump to {dp_file}' ) with open(_UpperCAmelCase , 'wb' ) as handle: pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
4
0
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge __A = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] __A = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowerCAmelCase_ ( ) -> int: """simple docstring""" lowerCamelCase__: Any =calculate_rouge(__a , __a , bootstrap_aggregation=__a , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(__a , __a ) lowerCamelCase__: List[Any] =calculate_rouge(__a , __a , bootstrap_aggregation=__a , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def lowerCAmelCase_ ( ) -> List[str]: """simple docstring""" lowerCamelCase__: Optional[int] ="rougeLsum" lowerCamelCase__: Tuple =calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=[k] )[k] lowerCamelCase__: Optional[Any] =calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: List[Any] =["rouge1", "rouge2", "rougeL"] lowerCamelCase__: int =calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=__a ) lowerCamelCase__: List[str] =calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=__a ) assert score_sep == score_no_sep def lowerCAmelCase_ ( ) -> Dict: """simple docstring""" lowerCamelCase__: Dict =[ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] lowerCamelCase__: int =[ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(__a , __a , newline_sep=__a ) == calculate_rouge(__a , __a , newline_sep=__a ) def lowerCAmelCase_ ( ) -> Tuple: """simple docstring""" lowerCamelCase__: List[str] =[ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] lowerCamelCase__: List[Any] =[ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] lowerCamelCase__: Dict =calculate_rouge(__a , __a , rouge_keys=["rougeLsum"] , newline_sep=__a )["rougeLsum"] lowerCamelCase__: Tuple =calculate_rouge(__a , __a , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def lowerCAmelCase_ ( ) -> Dict: """simple docstring""" lowerCamelCase__: Optional[Any] =Path("examples/seq2seq/test_data/wmt_en_ro" ) lowerCamelCase__: Union[str, Any] =calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(__a , __a ) lowerCamelCase__: Dict =calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=__a ) assert isinstance(__a , __a )
59
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Tuple = { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''', '''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''', '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''', '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''', '''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json''' ), '''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''', # See all BERT models at https://huggingface.co/models?filter=bert } class a ( a__ ): snake_case__ = '''bert''' def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , **_snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache lowerCAmelCase = classifier_dropout class a ( a__ ): @property def UpperCamelCase__ ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
4
0
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path lowerCAmelCase_ = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) lowerCAmelCase_ = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} lowerCAmelCase_ = '''zero2''' lowerCAmelCase_ = '''zero3''' lowerCAmelCase_ = [ZEROa, ZEROa] def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Tuple = parameterized.to_safe_name('''_'''.join(str(_UpperCamelCase ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test lowerCAmelCase_ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __lowerCAmelCase ( _a ): @parameterized.expand(__magic_name__ , name_func=__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' self.run_and_check( stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , ) @require_torch_multi_gpu @parameterized.expand(__magic_name__ , name_func=__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' self.run_and_check( stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , ) @parameterized.expand(__magic_name__ , name_func=__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' self.run_and_check( stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , ) @require_torch_multi_gpu @parameterized.expand(__magic_name__ , name_func=__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' self.run_and_check( stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' pass def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = 10 , __magic_name__ = True , __magic_name__ = True , __magic_name__ = True , ) -> Any: '''simple docstring''' snake_case_ : List[Any] = models[model] snake_case_ : Optional[int] = self.run_trainer( stage=__magic_name__ , model_name=__magic_name__ , eval_steps=__magic_name__ , num_train_epochs=1 , distributed=__magic_name__ , fpaa=__magic_name__ , ) self.do_checks(__magic_name__ ) return output_dir def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = 10 , __magic_name__ = 1 , __magic_name__ = True , __magic_name__ = True , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.get_auto_remove_tmp_dir('''./xxx''' , after=__magic_name__ ) snake_case_ : Optional[Any] = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(__magic_name__ )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files snake_case_ : Tuple = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() snake_case_ : Dict = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] snake_case_ : Optional[int] = self.get_launcher(__magic_name__ ) snake_case_ : List[Any] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__magic_name__ , env=self.get_env() ) return output_dir def lowerCamelCase (self , __magic_name__=False ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
60
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a ( a__ , unittest.TestCase ): snake_case__ = DanceDiffusionPipeline snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS snake_case__ = PipelineTesterMixin.required_optional_params - { '''callback''', '''latents''', '''callback_steps''', '''output_type''', '''num_images_per_prompt''', } snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , ) lowerCAmelCase = IPNDMScheduler() lowerCAmelCase = { 'unet': unet, 'scheduler': scheduler, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 4, } return inputs def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = DanceDiffusionPipeline(**_snake_case ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = pipe(**_snake_case ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_save_load_local() @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_save_load_optional_components() @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_attention_slicing_forward_pass() def UpperCamelCase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch_device lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch_device lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
4
0
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter UpperCamelCase = True except ImportError: UpperCamelCase = False UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name def _A ( lowerCAmelCase_ : Namespace ): """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" @staticmethod def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> int: lowerCAmelCase__ = parser.add_parser("add-new-model" ) add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." ) add_new_model_parser.add_argument("--testing_file" , type=SCREAMING_SNAKE_CASE__ , help="Configuration file on which to run." ) add_new_model_parser.add_argument( "--path" , type=SCREAMING_SNAKE_CASE__ , help="Path to cookiecutter. Should only be used for testing purposes." ) add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ ) def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , *SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]: lowerCAmelCase__ = testing lowerCAmelCase__ = testing_file lowerCAmelCase__ = path def a ( self : Union[str, Any] ) -> Tuple: warnings.warn( "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. " "It is not actively maintained anymore, so might give a result that won't pass all tests and quality " "checks, you should use `transformers-cli add-new-model-like` instead." ) if not _has_cookiecutter: raise ImportError( "Model creation dependencies are required to use the `add_new_model` command. Install them by running " "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory lowerCAmelCase__ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]] if len(SCREAMING_SNAKE_CASE__ ) > 0: raise ValueError( "Several directories starting with `cookiecutter-template-` in current working directory. " "Please clean your directory by removing all folders starting with `cookiecutter-template-` or " "change your working directory." ) lowerCAmelCase__ = ( Path(SCREAMING_SNAKE_CASE__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) lowerCAmelCase__ = path_to_transformer_root / "templates" / "adding_a_new_model" # Execute cookiecutter if not self._testing: cookiecutter(str(SCREAMING_SNAKE_CASE__ ) ) else: with open(self._testing_file , "r" ) as configuration_file: lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=SCREAMING_SNAKE_CASE__ , extra_context=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0] # Retrieve configuration with open(directory + "/configuration.json" , "r" ) as configuration_file: lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = configuration["lowercase_modelname"] lowerCAmelCase__ = configuration["generate_tensorflow_pytorch_and_flax"] os.remove(f'{directory}/configuration.json' ) lowerCAmelCase__ = "PyTorch" in generate_tensorflow_pytorch_and_flax lowerCAmelCase__ = "TensorFlow" in generate_tensorflow_pytorch_and_flax lowerCAmelCase__ = "Flax" in generate_tensorflow_pytorch_and_flax lowerCAmelCase__ = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=SCREAMING_SNAKE_CASE__ ) # Tests require submodules as they have parent imports with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , "w" ): pass shutil.move( f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , ) shutil.move( f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , ) def remove_copy_lines(SCREAMING_SNAKE_CASE__ : Dict ): with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: lowerCAmelCase__ = f.readlines() with open(SCREAMING_SNAKE_CASE__ , "w" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(SCREAMING_SNAKE_CASE__ ) if output_pytorch: if not self._testing: remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , ) shutil.move( f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , ) else: os.remove(f'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , ) shutil.move( f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , ) else: os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , ) shutil.move( f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , ) else: os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , ) shutil.move( f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , ) shutil.move( f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ): # Create temp file lowerCAmelCase__ , lowerCAmelCase__ = mkstemp() lowerCAmelCase__ = False with fdopen(SCREAMING_SNAKE_CASE__ , "w" ) as new_file: with open(SCREAMING_SNAKE_CASE__ ) as old_file: for line in old_file: new_file.write(SCREAMING_SNAKE_CASE__ ) if line_to_copy_below in line: lowerCAmelCase__ = True for line_to_copy in lines_to_copy: new_file.write(SCREAMING_SNAKE_CASE__ ) if not line_found: raise ValueError(f'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Remove original file remove(SCREAMING_SNAKE_CASE__ ) # Move new file move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def skip_units(SCREAMING_SNAKE_CASE__ : List[Any] ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(SCREAMING_SNAKE_CASE__ : List[Any] ): with open(SCREAMING_SNAKE_CASE__ ) as datafile: lowerCAmelCase__ = [] lowerCAmelCase__ = False lowerCAmelCase__ = False for line in datafile: if "# To replace in: " in line and "##" not in line: lowerCAmelCase__ = line.split("\"" )[1] lowerCAmelCase__ = skip_units(SCREAMING_SNAKE_CASE__ ) elif "# Below: " in line and "##" not in line: lowerCAmelCase__ = line.split("\"" )[1] lowerCAmelCase__ = skip_units(SCREAMING_SNAKE_CASE__ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = [] elif "# Replace with" in line and "##" not in line: lowerCAmelCase__ = [] elif "##" not in line: lines_to_copy.append(SCREAMING_SNAKE_CASE__ ) remove(SCREAMING_SNAKE_CASE__ ) replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(SCREAMING_SNAKE_CASE__ )
61
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = OpenLlamaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = OpenLlamaModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , ) lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , ) lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() # first forward pass lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , ) lowerCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0] lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else () snake_case__ = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenLlamaModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase = type self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'single_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'multi_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @parameterized.expand([('linear',), ('dynamic',)] ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size ) lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = OpenLlamaModel(_snake_case ) original_model.to(_snake_case ) original_model.eval() lowerCAmelCase = original_model(_snake_case ).last_hidden_state lowerCAmelCase = original_model(_snake_case ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = {'type': scaling_type, 'factor': 10.0} lowerCAmelCase = OpenLlamaModel(_snake_case ) scaled_model.to(_snake_case ) scaled_model.eval() lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
4
0
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors snake_case = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = '''sequence-classification''' def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple ): if type(UpperCAmelCase_ ) == dict: SCREAMING_SNAKE_CASE : List[str] = Namespace(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = glue_output_modes[hparams.task] SCREAMING_SNAKE_CASE : Union[str, Any] = glue_tasks_num_labels[hparams.task] super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , self.mode ) def _A ( self : List[str] , **UpperCAmelCase_ : str ): return self.model(**UpperCAmelCase_ ) def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ): SCREAMING_SNAKE_CASE : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: SCREAMING_SNAKE_CASE : Union[str, Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None SCREAMING_SNAKE_CASE : Union[str, Any] = self(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = outputs[0] SCREAMING_SNAKE_CASE : List[Any] = self.trainer.lr_schedulers[0]["scheduler"] SCREAMING_SNAKE_CASE : List[str] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : List[Any] = self.hparams SCREAMING_SNAKE_CASE : Tuple = processors[args.task]() SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels() for mode in ["train", "dev"]: SCREAMING_SNAKE_CASE : str = self._feature_file(UpperCAmelCase_ ) if os.path.exists(UpperCAmelCase_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , UpperCAmelCase_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) SCREAMING_SNAKE_CASE : Union[str, Any] = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) SCREAMING_SNAKE_CASE : List[str] = convert_examples_to_features( UpperCAmelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , UpperCAmelCase_ ) torch.save(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False ): SCREAMING_SNAKE_CASE : Dict = "dev" if mode == "test" else mode SCREAMING_SNAKE_CASE : List[Any] = self._feature_file(UpperCAmelCase_ ) logger.info("Loading features from cached file %s" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = torch.load(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) SCREAMING_SNAKE_CASE : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , batch_size=UpperCAmelCase_ , shuffle=UpperCAmelCase_ , ) def _A ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: SCREAMING_SNAKE_CASE : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None SCREAMING_SNAKE_CASE : str = self(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = outputs[:2] SCREAMING_SNAKE_CASE : Union[str, Any] = logits.detach().cpu().numpy() SCREAMING_SNAKE_CASE : Optional[int] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _A ( self : List[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() SCREAMING_SNAKE_CASE : Tuple = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": SCREAMING_SNAKE_CASE : List[str] = np.argmax(UpperCAmelCase_ , axis=1 ) elif self.hparams.glue_output_mode == "regression": SCREAMING_SNAKE_CASE : str = np.squeeze(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate([x["target"] for x in outputs] , axis=0 ) SCREAMING_SNAKE_CASE : Dict = [[] for _ in range(out_label_ids.shape[0] )] SCREAMING_SNAKE_CASE : List[Any] = [[] for _ in range(out_label_ids.shape[0] )] SCREAMING_SNAKE_CASE : Any = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCAmelCase_ , UpperCAmelCase_ )} SCREAMING_SNAKE_CASE : Union[str, Any] = dict(results.items() ) SCREAMING_SNAKE_CASE : Optional[Any] = results return ret, preds_list, out_label_list def _A ( self : Union[str, Any] , UpperCAmelCase_ : list ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self._eval_end(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _A ( self : List[str] , UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._eval_end(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _A ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ): BaseTransformer.add_model_specific_args(UpperCAmelCase_ , UpperCAmelCase_ ) parser.add_argument( "--max_seq_length" , default=128 , type=UpperCAmelCase_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=UpperCAmelCase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser() add_generic_args(lowercase , os.getcwd() ) SCREAMING_SNAKE_CASE : int = GLUETransformer.add_model_specific_args(lowercase , os.getcwd() ) SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: SCREAMING_SNAKE_CASE : List[str] = os.path.join( "./results" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , ) os.makedirs(args.output_dir ) SCREAMING_SNAKE_CASE : Union[str, Any] = GLUETransformer(lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = generic_train(lowercase , lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=lowercase ) ) SCREAMING_SNAKE_CASE : Dict = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(lowercase ) if __name__ == "__main__": main()
62
"""simple docstring""" from typing import Any class a : def __init__( self , _snake_case ): """simple docstring""" lowerCAmelCase = data lowerCAmelCase = None def __repr__( self ): """simple docstring""" return F'Node({self.data})' class a : def __init__( self ): """simple docstring""" lowerCAmelCase = None def __iter__( self ): """simple docstring""" lowerCAmelCase = self.head while node: yield node.data lowerCAmelCase = node.next def __len__( self ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self ): """simple docstring""" return "->".join([str(_snake_case ) for item in self] ) def __getitem__( self , _snake_case ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , _snake_case , _snake_case ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) lowerCAmelCase = self.head for _ in range(_snake_case ): lowerCAmelCase = current.next lowerCAmelCase = data def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" self.insert_nth(len(self ) , _snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" self.insert_nth(0 , _snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) lowerCAmelCase = Node(_snake_case ) if self.head is None: lowerCAmelCase = new_node elif index == 0: lowerCAmelCase = self.head # link new_node to head lowerCAmelCase = new_node else: lowerCAmelCase = self.head for _ in range(index - 1 ): lowerCAmelCase = temp.next lowerCAmelCase = temp.next lowerCAmelCase = new_node def UpperCamelCase__ ( self ): # print every node data """simple docstring""" print(self ) def UpperCamelCase__ ( self ): """simple docstring""" return self.delete_nth(0 ) def UpperCamelCase__ ( self ): # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def UpperCamelCase__ ( self , _snake_case = 0 ): """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) lowerCAmelCase = self.head # default first node if index == 0: lowerCAmelCase = self.head.next else: lowerCAmelCase = self.head for _ in range(index - 1 ): lowerCAmelCase = temp.next lowerCAmelCase = temp.next lowerCAmelCase = temp.next.next return delete_node.data def UpperCamelCase__ ( self ): """simple docstring""" return self.head is None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = None lowerCAmelCase = self.head while current: # Store the current node's next node. lowerCAmelCase = current.next # Make the current node's next point backwards lowerCAmelCase = prev # Make the previous node be the current node lowerCAmelCase = current # Make the current node the next node (to progress iteration) lowerCAmelCase = next_node # Return prev in order to put the head at the end lowerCAmelCase = prev def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = LinkedList() assert linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_UpperCAmelCase ) == i linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_UpperCAmelCase ) == 9 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowerCAmelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = [ -9, 100, Node(7734_5112 ), 'dlrow olleH', 7, 5555, 0, -192.5_5555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] lowerCAmelCase = LinkedList() for i in test_input: linked_list.insert_tail(_UpperCAmelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowerCAmelCase = linked_list.delete_head() assert result == -9 assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowerCAmelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowerCAmelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(_UpperCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_UpperCAmelCase ) assert ( str(_UpperCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_UpperCAmelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _SCREAMING_SNAKE_CASE (): from doctest import testmod testmod() lowerCAmelCase = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(_UpperCAmelCase ) print('\nReading/changing Node data using indexing:' ) print(F'Element at Position 1: {linked_list[1]}' ) lowerCAmelCase = input('Enter New Value: ' ).strip() print('New list:' ) print(_UpperCAmelCase ) print(F'length of linked_list is : {len(_UpperCAmelCase )}' ) if __name__ == "__main__": main()
4
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : List[Any] = { "facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json", } class a ( lowercase__ ): """simple docstring""" a : Union[str, Any] = 'timesformer' def __init__( self : List[Any] , __lowercase : Optional[Any]=224 , __lowercase : List[Any]=16 , __lowercase : int=3 , __lowercase : Tuple=8 , __lowercase : str=768 , __lowercase : List[Any]=12 , __lowercase : List[Any]=12 , __lowercase : Dict=3072 , __lowercase : Dict="gelu" , __lowercase : Dict=0.0 , __lowercase : Dict=0.0 , __lowercase : int=0.02 , __lowercase : List[str]=1e-6 , __lowercase : Tuple=True , __lowercase : Tuple="divided_space_time" , __lowercase : Any=0 , **__lowercase : List[Any] , ) -> Tuple: super().__init__(**__lowercase ) __UpperCAmelCase : Optional[int] = image_size __UpperCAmelCase : Dict = patch_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : str = num_frames __UpperCAmelCase : Optional[int] = hidden_size __UpperCAmelCase : Union[str, Any] = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : Optional[int] = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Any = layer_norm_eps __UpperCAmelCase : List[Any] = qkv_bias __UpperCAmelCase : Union[str, Any] = attention_type __UpperCAmelCase : Dict = drop_path_rate
63
"""simple docstring""" from __future__ import annotations import requests def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(_UpperCAmelCase ).json() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ): lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories] return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ): lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase ) return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
4
0
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowercase_ : Union[str, Any] = logging.get_logger(__name__) logging.set_verbosity_info() def A__ ( snake_case_ : str , snake_case_ : str ): if "xprophetnet" in prophetnet_checkpoint_path: SCREAMING_SNAKE_CASE__: Dict= XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case_ ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= XLMProphetNetForConditionalGeneration.from_pretrained( snake_case_ , output_loading_info=snake_case_ ) else: SCREAMING_SNAKE_CASE__: int= ProphetNetForConditionalGenerationOld.from_pretrained(snake_case_ ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= ProphetNetForConditionalGeneration.from_pretrained( snake_case_ , output_loading_info=snake_case_ ) SCREAMING_SNAKE_CASE__: Tuple= ['''key_proj''', '''value_proj''', '''query_proj'''] SCREAMING_SNAKE_CASE__: Optional[int]= { '''self_attn''': '''ngram_self_attn''', '''cross_attn''': '''encoder_attn''', '''cross_attn_layer_norm''': '''encoder_attn_layer_norm''', '''feed_forward_layer_norm''': '''final_layer_norm''', '''feed_forward''': '''''', '''intermediate''': '''fc1''', '''output''': '''fc2''', '''key_proj''': '''k_proj''', '''query_proj''': '''q_proj''', '''value_proj''': '''v_proj''', '''word_embeddings''': '''embed_tokens''', '''embeddings_layer_norm''': '''emb_layer_norm''', '''relative_pos_embeddings''': '''relative_linear''', '''ngram_embeddings''': '''ngram_input_embed''', '''position_embeddings''': '''embed_positions''', } for key in loading_info["missing_keys"]: SCREAMING_SNAKE_CASE__: Optional[int]= key.split('''.''' ) if attributes[0] == "lm_head": SCREAMING_SNAKE_CASE__: List[str]= prophet SCREAMING_SNAKE_CASE__: List[Any]= prophet_old else: SCREAMING_SNAKE_CASE__: List[Any]= prophet.prophetnet SCREAMING_SNAKE_CASE__: Dict= prophet_old.model SCREAMING_SNAKE_CASE__: Tuple= False for attribute in attributes: if attribute in mapping: SCREAMING_SNAKE_CASE__: Optional[Any]= mapping[attribute] if not hasattr(snake_case_ , snake_case_ ) and len(snake_case_ ) > 0: SCREAMING_SNAKE_CASE__: Optional[int]= attribute elif hasattr(snake_case_ , snake_case_ ): SCREAMING_SNAKE_CASE__: Optional[int]= attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" SCREAMING_SNAKE_CASE__: Tuple= old_model.weight logger.info(F'{attribute} is initialized.' ) SCREAMING_SNAKE_CASE__: Optional[Any]= True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" SCREAMING_SNAKE_CASE__: Dict= old_model.bias logger.info(F'{attribute} is initialized' ) SCREAMING_SNAKE_CASE__: Tuple= True break elif attribute in special_keys and hasattr(snake_case_ , '''in_proj_weight''' ): SCREAMING_SNAKE_CASE__: Any= old_model.in_proj_weight.shape[0] // 3 SCREAMING_SNAKE_CASE__: List[str]= getattr(snake_case_ , snake_case_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": SCREAMING_SNAKE_CASE__: Any= nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) SCREAMING_SNAKE_CASE__: Optional[Any]= nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": SCREAMING_SNAKE_CASE__: List[str]= nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) SCREAMING_SNAKE_CASE__: Dict= nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": SCREAMING_SNAKE_CASE__: Union[str, Any]= nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) SCREAMING_SNAKE_CASE__: str= nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) SCREAMING_SNAKE_CASE__: Union[str, Any]= True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." SCREAMING_SNAKE_CASE__: Dict= nn.Parameter(old_model.embed_positions.weight[:512, :] ) SCREAMING_SNAKE_CASE__: Any= True break if attribute.isdigit(): SCREAMING_SNAKE_CASE__: Tuple= model[int(snake_case_ )] SCREAMING_SNAKE_CASE__: Tuple= old_model[int(snake_case_ )] else: SCREAMING_SNAKE_CASE__: List[str]= getattr(snake_case_ , snake_case_ ) if old_attribute == "": SCREAMING_SNAKE_CASE__: List[Any]= old_model else: if not hasattr(snake_case_ , snake_case_ ): raise ValueError(F'{old_model} does not have {old_attribute}' ) SCREAMING_SNAKE_CASE__: int= getattr(snake_case_ , snake_case_ ) if not is_key_init: raise ValueError(F'{key} was not correctly initialized!' ) print(F'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(snake_case_ ) if __name__ == "__main__": lowercase_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowercase_ : str = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
64
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ): lowerCAmelCase = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowerCAmelCase = 4 lowerCAmelCase = 48 lowerCAmelCase = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase = [6, 6, 6, 6] lowerCAmelCase = 60 lowerCAmelCase = [6, 6, 6, 6] lowerCAmelCase = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase = 4 lowerCAmelCase = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowerCAmelCase = 1 lowerCAmelCase = 1 lowerCAmelCase = 126 lowerCAmelCase = 7 lowerCAmelCase = 255.0 lowerCAmelCase = '' return config def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ): if "patch_embed.proj" in name and "layers" not in name: lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: lowerCAmelCase = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: lowerCAmelCase = name.replace('attn' , 'attention.self' ) if "norm1" in name: lowerCAmelCase = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowerCAmelCase = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: lowerCAmelCase = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: lowerCAmelCase = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: lowerCAmelCase = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": lowerCAmelCase = 'layernorm.weight' if name == "norm.bias": lowerCAmelCase = 'layernorm.bias' if "conv_first" in name: lowerCAmelCase = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowerCAmelCase = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' ) lowerCAmelCase = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: lowerCAmelCase = 'swin2sr.' + name return name def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ): for key in orig_state_dict.copy().keys(): lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase ) if "qkv" in key: lowerCAmelCase = key.split('.' ) lowerCAmelCase = int(key_split[1] ) lowerCAmelCase = int(key_split[4] ) lowerCAmelCase = config.embed_dim if "weight" in key: lowerCAmelCase = val[:dim, :] lowerCAmelCase = val[dim : dim * 2, :] lowerCAmelCase = val[-dim:, :] else: lowerCAmelCase = val[:dim] lowerCAmelCase = val[dim : dim * 2] lowerCAmelCase = val[-dim:] pass else: lowerCAmelCase = val return orig_state_dict def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ): lowerCAmelCase = get_config(_UpperCAmelCase ) lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase ) model.eval() lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' ) lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' ) lowerCAmelCase = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256 lowerCAmelCase = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 ) if config.num_channels == 1: lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 ) lowerCAmelCase = model(_UpperCAmelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 512, 512] ) lowerCAmelCase = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 512, 512] ) lowerCAmelCase = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 ) print('Looks ok!' ) lowerCAmelCase = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } lowerCAmelCase = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_UpperCAmelCase ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') __UpperCamelCase : Optional[int] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaControlnetPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def __lowercase ( self : int ): '''simple docstring''' return 32 @property def __lowercase ( self : Dict ): '''simple docstring''' return self.time_input_dim @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __lowercase ( self : Any ): '''simple docstring''' return 100 @property def __lowercase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase__ : int = UNetaDConditionModel(**A ) return model @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowercase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.dummy_unet UpperCAmelCase__ : List[Any] = self.dummy_movq UpperCAmelCase__ : List[Any] = DDIMScheduler( num_train_timesteps=1_000 ,beta_schedule="""linear""" ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=A ,set_alpha_to_one=A ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=A ,) UpperCAmelCase__ : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __lowercase ( self : str ,A : Optional[Any] ,A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( A ) # create hint UpperCAmelCase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase__ : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase__ : Dict = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase__ : Dict = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = """cpu""" UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**A ) UpperCAmelCase__ : Optional[int] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(A ) ) UpperCAmelCase__ : Tuple = output.images UpperCAmelCase__ : Dict = pipe( **self.get_dummy_inputs(A ) ,return_dict=A ,)[0] UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Optional[int] = np.array( [0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def __lowercase ( self : Union[str, Any] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) UpperCAmelCase__ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) UpperCAmelCase__ : int = torch.from_numpy(np.array(A ) ).float() / 2_5_5.0 UpperCAmelCase__ : Union[str, Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) UpperCAmelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(A ) UpperCAmelCase__ : List[Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) UpperCAmelCase__ : int = pipeline.to(A ) pipeline.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[Any] = """A robot, 4k photo""" UpperCAmelCase__ : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = pipe_prior( A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() UpperCAmelCase__ : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ : int = pipeline( image_embeds=A ,negative_image_embeds=A ,hint=A ,generator=A ,num_inference_steps=100 ,output_type="""np""" ,) UpperCAmelCase__ : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A ,A )
65
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class a ( a__ ): snake_case__ = '''megatron-bert''' def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , **_snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache
4
0
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCamelCase = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") UpperCamelCase = f'''https://www.google.com/search?q={query}&num=100''' UpperCamelCase = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: UpperCamelCase = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: UpperCamelCase = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
66
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
4
0
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :int , snake_case__ :List[str] , snake_case__ :int ) -> int: # noqa: E741 while r - l > 1: _lowercase = (l + r) // 2 if v[m] >= key: _lowercase = m else: _lowercase = m # noqa: E741 return r def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] ) -> int: if len(snake_case__ ) == 0: return 0 _lowercase = [0] * len(snake_case__ ) _lowercase = 1 _lowercase = v[0] for i in range(1 , len(snake_case__ ) ): if v[i] < tail[0]: _lowercase = v[i] elif v[i] > tail[length - 1]: _lowercase = v[i] length += 1 else: _lowercase = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
67
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class a ( a__ ): snake_case__ = 42 class a ( a__ , a__ ): @register_to_config def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ): """simple docstring""" super().__init__() # pass init params to Encoder lowerCAmelCase = Encoder( in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , ) lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 ) lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case ) lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 ) # pass init params to Decoder lowerCAmelCase = Decoder( in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , ) @apply_forward_hook def UpperCamelCase__ ( self , _snake_case , _snake_case = True ): """simple docstring""" lowerCAmelCase = self.encoder(_snake_case ) lowerCAmelCase = self.quant_conv(_snake_case ) if not return_dict: return (h,) return VQEncoderOutput(latents=_snake_case ) @apply_forward_hook def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ): """simple docstring""" if not force_not_quantize: lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case ) else: lowerCAmelCase = h lowerCAmelCase = self.post_quant_conv(_snake_case ) lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case = True ): """simple docstring""" lowerCAmelCase = sample lowerCAmelCase = self.encode(_snake_case ).latents lowerCAmelCase = self.decode(_snake_case ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_snake_case )
4
0
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput __A = "scheduler_config.json" class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : str = 1 lowerCamelCase : int = 2 lowerCamelCase : Any = 3 lowerCamelCase : Tuple = 4 lowerCamelCase : Dict = 5 lowerCamelCase : Optional[int] = 6 lowerCamelCase : Optional[Any] = 7 lowerCamelCase : Union[str, Any] = 8 lowerCamelCase : str = 9 lowerCamelCase : Union[str, Any] = 10 lowerCamelCase : Tuple = 11 lowerCamelCase : Dict = 12 lowerCamelCase : int = 13 lowerCamelCase : List[Any] = 14 @dataclass class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : torch.FloatTensor class _A : """simple docstring""" lowerCamelCase : Optional[Any] = SCHEDULER_CONFIG_NAME lowerCamelCase : Dict = [] lowerCamelCase : Any = True @classmethod def _a ( cls : Any , __SCREAMING_SNAKE_CASE : Dict[str, Any] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Dict=False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =cls.load_config( pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE , return_commit_hash=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) return cls.from_config(__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple ) -> Tuple: self.save_config(save_directory=__SCREAMING_SNAKE_CASE , push_to_hub=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def _a ( self : Union[str, Any] ) -> Any: return self._get_compatibles() @classmethod def _a ( cls : int ) -> Tuple: __UpperCAmelCase =list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase =importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase =[ getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ] return compatible_classes
68
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping __UpperCamelCase : Optional[Any] = tuple[int, int] class a : def __init__( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = vertices lowerCAmelCase = { (min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items() } def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCAmelCase = weight def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Graph({min(self.vertices )} , {} ) lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 while len(subgraph.vertices ) < len(self.vertices ): lowerCAmelCase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCAmelCase = edge lowerCAmelCase = weight subgraph.add_edge(_snake_case , _snake_case ) return subgraph def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ): lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) ) lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = {} lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 with open(_UpperCAmelCase ) as f: lowerCAmelCase = f.read().strip().split('\n' ) lowerCAmelCase = [line.split(',' ) for line in data] for edgea in range(1 , len(_UpperCAmelCase ) ): for edgea in range(_UpperCAmelCase ): if adjaceny_matrix[edgea][edgea] != "-": lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] ) lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase ) lowerCAmelCase = graph.prims_algorithm() lowerCAmelCase = sum(graph.edges.values() ) lowerCAmelCase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
4
0
'''simple docstring''' import numpy class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : numpy.ndarray , a_ : numpy.ndarray ): """simple docstring""" __snake_case = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. __snake_case = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. __snake_case = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. __snake_case = numpy.random.rand(3 , 1 ) # Real output values provided. __snake_case = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. __snake_case = numpy.zeros(output_array.shape ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. __snake_case = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. __snake_case = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def A ( self : Optional[Any] ): """simple docstring""" __snake_case = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) __snake_case = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) __snake_case = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def A ( self : Union[str, Any] , a_ : numpy.ndarray , a_ : int , a_ : bool ): """simple docstring""" for iteration in range(1 , iterations + 1 ): __snake_case = self.feedforward() self.back_propagation() if give_loss: __snake_case = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'''Iteration {iteration} Loss: {loss}''' ) def A ( self : Optional[Any] , a_ : numpy.ndarray ): """simple docstring""" __snake_case = input_arr __snake_case = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) __snake_case = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) __snake_case = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def __UpperCAmelCase ( _UpperCAmelCase : numpy.ndarray ) -> numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def __UpperCAmelCase ( _UpperCAmelCase : numpy.ndarray ) -> numpy.ndarray: return (value) * (1 - (value)) def __UpperCAmelCase ( ) -> int: __snake_case = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. __snake_case = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. __snake_case = TwoHiddenLayerNeuralNetwork( input_array=_UpperCAmelCase , output_array=_UpperCAmelCase ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_UpperCAmelCase , iterations=10 , give_loss=_UpperCAmelCase ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
69
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] ) lowerCAmelCase = np.array(_UpperCAmelCase ) lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = (1, 2, 1) lowerCAmelCase = (1, 1, 0, 7) lowerCAmelCase = SARIMAX( _UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase ) lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' ) lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] ) return result[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = regressor.predict(_UpperCAmelCase ) return y_pred[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ): train_user.sort() lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 ) lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 ) lowerCAmelCase = qa - qa lowerCAmelCase = qa - (iqr * 0.1) return low_lim def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ): lowerCAmelCase = 0 lowerCAmelCase = 0 for i in list_vote: if i > actual_result: lowerCAmelCase = not_safe + 1 else: if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]] __UpperCamelCase : Any = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) __UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values) # split data __UpperCamelCase : Dict = normalize_df[:, 2].tolist() __UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist() __UpperCamelCase : List[str] = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist() __UpperCamelCase : Tuple = x[: len(x) - 1] __UpperCamelCase : Any = x[len(x) - 1 :] # for linear regression & sarimax __UpperCamelCase : str = total_date[: len(total_date) - 1] __UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1] __UpperCamelCase : List[Any] = total_match[: len(total_match) - 1] __UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :] __UpperCamelCase : str = total_user[len(total_user) - 1 :] __UpperCamelCase : str = total_match[len(total_match) - 1 :] # voting system with forecasting __UpperCamelCase : Any = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
4
0
from __future__ import annotations from typing import TypedDict class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = 42 def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' if not isinstance(lowercase , lowercase ): raise TypeError('The parameter s type must be str.' ) return [s[i:] + s[:i] for i in range(len(lowercase ) )] def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' if not isinstance(lowercase , lowercase ): raise TypeError('The parameter s type must be str.' ) if not s: raise ValueError('The parameter s must not be empty.' ) lowerCamelCase_ = all_rotations(lowercase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation lowerCamelCase_ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(lowercase ), } return response def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int ): '''simple docstring''' if not isinstance(lowercase , lowercase ): raise TypeError('The parameter bwt_string type must be str.' ) if not bwt_string: raise ValueError('The parameter bwt_string must not be empty.' ) try: lowerCamelCase_ = int(lowercase ) except ValueError: raise TypeError( 'The parameter idx_original_string type must be int or passive' ' of cast to int.' ) if idx_original_string < 0: raise ValueError('The parameter idx_original_string must not be lower than 0.' ) if idx_original_string >= len(lowercase ): raise ValueError( 'The parameter idx_original_string must be lower than' ' len(bwt_string).' ) lowerCamelCase_ = [''] * len(lowercase ) for _ in range(len(lowercase ) ): for i in range(len(lowercase ) ): lowerCamelCase_ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCamelCase : Any = "Provide a string that I will generate its BWT transform: " lowerCamelCase : Tuple = input(entry_msg).strip() lowerCamelCase : Dict = bwt_transform(s) print( F"""Burrows Wheeler transform for string '{s}' results """ F"""in '{result['bwt_string']}'""" ) lowerCamelCase : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"]) print( F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """ F"""we get original string '{original_string}'""" )
70
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , ) lowerCAmelCase = parser.parse_args() return args def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ): if not len(_UpperCAmelCase ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) lowerCAmelCase ,lowerCAmelCase = imgs[0].size lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) ) lowerCAmelCase ,lowerCAmelCase = grid.size for i, img in enumerate(_UpperCAmelCase ): grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) ) return grid def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ): lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase ) lowerCAmelCase = pipeline( _UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) ) lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __UpperCamelCase : Optional[Any] = parse_args() # Load models and create wrapper for stable diffusion __UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') __UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') __UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') __UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') __UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): __UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: __UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id)) __UpperCamelCase : Optional[Any] = pipeline.to(unet.device) __UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) __UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
4
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
"""simple docstring""" import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __UpperCamelCase : List[Any] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ): lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}' dest_layers.load_state_dict(layers_to_copy.state_dict() ) __UpperCamelCase : Optional[Any] = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __UpperCamelCase : int = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ): try: lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first' F' {n_student}' ) return list(range(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ): if n_student > n_teacher: raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' ) elif n_teacher == n_student: return list(range(_UpperCAmelCase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ): lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(_UpperCAmelCase , _UpperCAmelCase ): AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval() else: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}' lowerCAmelCase = teacher.config.to_diff_dict() try: lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: lowerCAmelCase = teacher_e if d is None: lowerCAmelCase = teacher_d init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} ) except AttributeError: # T5 if hasattr(teacher.config , 'num_encoder_layers' ): lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: lowerCAmelCase = teacher_e if d is None: lowerCAmelCase = teacher_d if hasattr(teacher.config , 'num_encoder_layers' ): init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} ) else: init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(_UpperCAmelCase ) # Copy weights lowerCAmelCase = teacher.config_class(**_UpperCAmelCase ) lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to' F' {save_path}' ) student.save_pretrained(_UpperCAmelCase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) if d_layers_to_copy is None: lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) try: if hasattr( _UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase ) copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' ) lowerCAmelCase = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(_UpperCAmelCase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
4
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging _UpperCAmelCase : List[str] = logging.get_logger(__name__) _UpperCAmelCase : str = { '''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class __magic_name__ ( __SCREAMING_SNAKE_CASE ): UpperCamelCase__ = 'umt5' UpperCamelCase__ = ['past_key_values'] def __init__( self , snake_case_=25_01_12 , snake_case_=5_12 , snake_case_=64 , snake_case_=10_24 , snake_case_=8 , snake_case_=None , snake_case_=6 , snake_case_=32 , snake_case_=1_28 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="gated-gelu" , snake_case_=True , snake_case_=True , snake_case_="T5Tokenizer" , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=0 , **snake_case_ , ): super().__init__( is_encoder_decoder=snake_case_ , tokenizer_class=snake_case_ , tie_word_embeddings=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , ) lowercase =vocab_size lowercase =d_model lowercase =d_kv lowercase =d_ff lowercase =num_layers lowercase =( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase =num_heads lowercase =relative_attention_num_buckets lowercase =relative_attention_max_distance lowercase =dropout_rate lowercase =layer_norm_epsilon lowercase =initializer_factor lowercase =feed_forward_proj lowercase =use_cache lowercase =self.feed_forward_proj.split('''-''' ) lowercase =act_info[-1] lowercase =act_info[0] == '''gated''' if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2: raise ValueError( f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) if feed_forward_proj == "gated-gelu": lowercase ='''gelu_new''' @property def _A( self ): return self.d_model @property def _A( self ): return self.num_heads @property def _A( self ): return self.num_layers class __magic_name__ ( __SCREAMING_SNAKE_CASE ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def _A( self ): lowercase ={ '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: lowercase ='''past_encoder_sequence + sequence''' lowercase ={0: '''batch'''} lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowercase ={0: '''batch''', 1: '''decoder_sequence'''} lowercase ={0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def _A( self ): return 13 @property def _A( self ): return 5E-4
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( A__ ): _lowercase : Dict = ['''image_processor''', '''tokenizer'''] _lowercase : Optional[Any] = '''AutoImageProcessor''' _lowercase : List[Any] = '''AutoTokenizer''' def __init__( self , a , a) -> Any: super().__init__(a , a) SCREAMING_SNAKE_CASE = self.image_processor def __call__( self , a=None , a=None , a=None , **a) -> Optional[int]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: SCREAMING_SNAKE_CASE = self.tokenizer(a , return_tensors=a , **a) if images is not None: SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=a , **a) if text is not None and images is not None: SCREAMING_SNAKE_CASE = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a) , tensor_type=a) def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Tuple: return self.tokenizer.batch_decode(*a , **a) def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[Any]: return self.tokenizer.decode(*a , **a) @property def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: return ["input_ids", "attention_mask", "pixel_values"]
73
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): lowerCAmelCase = 0.00 lowerCAmelCase = 0 for resistor in resistors: if resistor <= 0: lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!' raise ValueError(_UpperCAmelCase ) first_sum += 1 / float(_UpperCAmelCase ) index += 1 return 1 / first_sum def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): lowerCAmelCase = 0.00 lowerCAmelCase = 0 for resistor in resistors: sum_r += resistor if resistor < 0: lowerCAmelCase = F'Resistor at index {index} has a negative value!' raise ValueError(_UpperCAmelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
4
0
def a__ ( snake_case , snake_case ): """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __SCREAMING_SNAKE_CASE : Dict = str(bin(snake_case ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(snake_case ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE : str = max(len(snake_case ) , len(snake_case ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(snake_case ) , b_binary.zfill(snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
74
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : Tuple = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class a ( a__ ): snake_case__ = '''glpn''' def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) lowerCAmelCase = num_channels lowerCAmelCase = num_encoder_blocks lowerCAmelCase = depths lowerCAmelCase = sr_ratios lowerCAmelCase = hidden_sizes lowerCAmelCase = patch_sizes lowerCAmelCase = strides lowerCAmelCase = mlp_ratios lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = drop_path_rate lowerCAmelCase = layer_norm_eps lowerCAmelCase = decoder_hidden_size lowerCAmelCase = max_depth lowerCAmelCase = head_in_index
4
0
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['pixel_values'] def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ): '''simple docstring''' super().__init__(**_A ) UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224} UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : List[Any] = size UpperCAmelCase__ : int = resample UpperCAmelCase__ : int = do_center_crop UpperCAmelCase__ : List[str] = crop_size UpperCAmelCase__ : Union[str, Any] = do_rescale UpperCAmelCase__ : Optional[int] = rescale_factor UpperCAmelCase__ : List[Any] = do_normalize UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] ) UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( _A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ): '''simple docstring''' return rescale(_A , scale=_A , data_format=_A , **_A ) def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ): '''simple docstring''' return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ): '''simple docstring''' UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std UpperCAmelCase__ : Tuple = size if size is not None else self.size UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' ) UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images] if do_resize: UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images] if do_center_crop: UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images] if do_rescale: UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images] if do_normalize: UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images] UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] UpperCAmelCase__ : Dict = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
75
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = range_bbox def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase = bbox[i, j, 3] lowerCAmelCase = bbox[i, j, 1] lowerCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase = bbox[i, j, 2] lowerCAmelCase = bbox[i, j, 0] lowerCAmelCase = t lowerCAmelCase = tf.convert_to_tensor(_snake_case ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) snake_case__ = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = True snake_case__ = 1_0 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def _SCREAMING_SNAKE_CASE (): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the sequence output on [0, :3, :3] lowerCAmelCase = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) ) # test the pooled output on [1, :3] lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCAmelCase = outputs.loss lowerCAmelCase = (2,) self.assertEqual(loss.shape , _snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = (2, 2) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the shape of the logits lowerCAmelCase = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _snake_case ) self.assertEqual(outputs.end_logits.shape , _snake_case )
4
0
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[Any] = 3_84 __lowercase : List[Any] = 7 if "tiny" in model_name: __lowercase : List[str] = 96 __lowercase : Dict = (2, 2, 6, 2) __lowercase : Tuple = (3, 6, 12, 24) elif "small" in model_name: __lowercase : Optional[Any] = 96 __lowercase : int = (2, 2, 18, 2) __lowercase : List[str] = (3, 6, 12, 24) elif "base" in model_name: __lowercase : List[Any] = 1_28 __lowercase : List[str] = (2, 2, 18, 2) __lowercase : Tuple = (4, 8, 16, 32) __lowercase : Union[str, Any] = 12 __lowercase : Union[str, Any] = 5_12 elif "large" in model_name: __lowercase : List[Any] = 1_92 __lowercase : Union[str, Any] = (2, 2, 18, 2) __lowercase : Optional[int] = (6, 12, 24, 48) __lowercase : Union[str, Any] = 12 __lowercase : Optional[Any] = 7_68 # set label information __lowercase : Any = 1_50 __lowercase : List[str] = '''huggingface/label-files''' __lowercase : int = '''ade20k-id2label.json''' __lowercase : str = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) __lowercase : List[str] = {int(__UpperCamelCase ): v for k, v in idalabel.items()} __lowercase : Any = {v: k for k, v in idalabel.items()} __lowercase : Any = SwinConfig( embed_dim=__UpperCamelCase , depths=__UpperCamelCase , num_heads=__UpperCamelCase , window_size=__UpperCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) __lowercase : List[str] = UperNetConfig( backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , ) return config def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : str = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : List[Any] = dct.pop(__UpperCamelCase ) __lowercase : Optional[Any] = val def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowercase : List[str] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowercase : Optional[Any] = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) __lowercase : int = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowercase : Optional[Any] = in_proj_weight[:dim, :] __lowercase : Union[str, Any] = in_proj_bias[: dim] __lowercase : Any = in_proj_weight[ dim : dim * 2, : ] __lowercase : Dict = in_proj_bias[ dim : dim * 2 ] __lowercase : List[Any] = in_proj_weight[ -dim :, : ] __lowercase : Optional[Any] = in_proj_bias[-dim :] # fmt: on def __UpperCAmelCase ( __UpperCamelCase ): __lowercase ,__lowercase : List[Any] = x.shape __lowercase : Tuple = x.reshape(__UpperCamelCase , 4 , in_channel // 4 ) __lowercase : Union[str, Any] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__UpperCamelCase , __UpperCamelCase ) return x def __UpperCAmelCase ( __UpperCamelCase ): __lowercase ,__lowercase : List[Any] = x.shape __lowercase : Tuple = x.reshape(__UpperCamelCase , in_channel // 4 , 4 ) __lowercase : int = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__UpperCamelCase , __UpperCamelCase ) return x def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[int] = x.shape[0] __lowercase : Dict = x.reshape(4 , in_channel // 4 ) __lowercase : int = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__UpperCamelCase ) return x def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = x.shape[0] __lowercase : Any = x.reshape(in_channel // 4 , 4 ) __lowercase : Optional[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__UpperCamelCase ) return x def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : str = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } __lowercase : str = model_name_to_url[model_name] __lowercase : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' , file_name=__UpperCamelCase )[ '''state_dict''' ] for name, param in state_dict.items(): print(__UpperCamelCase , param.shape ) __lowercase : Optional[int] = get_upernet_config(__UpperCamelCase ) __lowercase : Any = UperNetForSemanticSegmentation(__UpperCamelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __lowercase : Dict = state_dict.pop(__UpperCamelCase ) if "bn" in key: __lowercase : Optional[Any] = key.replace('''bn''' , '''batch_norm''' ) __lowercase : Union[str, Any] = val # rename keys __lowercase : List[Any] = create_rename_keys(__UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) read_in_q_k_v(__UpperCamelCase , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: __lowercase : List[Any] = reverse_correct_unfold_reduction_order(__UpperCamelCase ) if "norm" in key: __lowercase : Optional[int] = reverse_correct_unfold_norm_order(__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # verify on image __lowercase : Optional[Any] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' __lowercase : Union[str, Any] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' ) __lowercase : str = SegformerImageProcessor() __lowercase : List[str] = processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values with torch.no_grad(): __lowercase : Optional[int] = model(__UpperCamelCase ) __lowercase : Optional[int] = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": __lowercase : Any = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": __lowercase : List[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": __lowercase : int = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": __lowercase : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__UpperCamelCase ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__UpperCamelCase ) if push_to_hub: print(f"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(f"""openmmlab/{model_name}""" ) processor.push_to_hub(f"""openmmlab/{model_name}""" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-swin-tiny', type=str, choices=[F"upernet-swin-{size}" for size in ['tiny', 'small', 'base', 'large']], help='Name of the Swin + UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
76
"""simple docstring""" import argparse import os import re import packaging.version __UpperCamelCase : Union[str, Any] = '''examples/''' __UpperCamelCase : str = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __UpperCamelCase : List[str] = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __UpperCamelCase : Optional[int] = '''README.md''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ): with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.read() lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern] lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase ) lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): for folder, directories, fnames in os.walk(_UpperCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not patch: update_version_in_examples(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '🤗 Transformers currently provides the following architectures' lowerCAmelCase = '1. Want to contribute a new model?' with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.readlines() # Find the start of the list. lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): lowerCAmelCase = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): with open(REPLACE_FILES['init'] , 'r' ) as f: lowerCAmelCase = f.read() lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0] return packaging.version.parse(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ): lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: lowerCAmelCase = default_version.base_version elif patch: lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' ) if len(_UpperCAmelCase ) == 0: lowerCAmelCase = default_version print(F'Updating version to {version}.' ) global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = get_version() lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0' lowerCAmelCase = current_version.base_version # Check with the user we got that right. lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' ) if len(_UpperCAmelCase ) == 0: lowerCAmelCase = dev_version print(F'Updating version to {version}.' ) global_version_update(_UpperCAmelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __UpperCamelCase : Optional[int] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
4
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: A = None A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } A = { """google/pegasus-xsum""": 512, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PegasusTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_): raise TypeError( F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is" F" {type(UpperCamelCase_)}") __UpperCAmelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1) ] if len(set(UpperCamelCase_)) != len(UpperCamelCase_): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.") __UpperCAmelCase : str = additional_special_tokens_extended else: __UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : List[str] = False if not self.vocab_file else True def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}") return [1 if x in all_special_ids else 0 for x in seq] def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __UpperCamelCase : Optional[int] = pytest.mark.integration @require_faiss class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} ) return dset def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() lowerCAmelCase = dset.map( lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case ) lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def UpperCamelCase__ ( self ): """simple docstring""" from elasticsearch import Elasticsearch lowerCAmelCase = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: lowerCAmelCase = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} lowerCAmelCase = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_snake_case ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1] lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case ) self.assertRaises(_snake_case , index.search_batch , queries[0] ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) lowerCAmelCase = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_snake_case ): lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = faiss.IndexFlat(5 ) lowerCAmelCase = FaissIndex(custom_index=_snake_case ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file: index.save(tmp_file.name ) lowerCAmelCase = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ): import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) lowerCAmelCase = 'index.faiss' lowerCAmelCase = F'mock://{index_name}' index.save(_UpperCAmelCase , storage_options=mockfs.storage_options ) lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options ) lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: lowerCAmelCase = Elasticsearch() lowerCAmelCase = {'acknowledged': True} lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query lowerCAmelCase = 'foo' lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout lowerCAmelCase = 'foo' lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries lowerCAmelCase = ['foo', 'bar', 'foobar'] lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([1, 1, 1] , _snake_case ) # batched queries with timeout lowerCAmelCase = ['foo', 'bar', 'foobar'] lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([1, 1, 1] , _snake_case )
4
0
'''simple docstring''' import math def lowerCAmelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' assert isinstance(snake_case_ , snake_case_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase_ = range(3 , int(math.sqrt(snake_case_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[int]=1 , **snake_case_ : Union[str, Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = factor * value UpperCAmelCase_ = value while not is_prime(snake_case_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **snake_case_ ) return value
78
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class a ( a__ , a__ , unittest.TestCase ): snake_case__ = IFInpaintingPipeline snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase__ ( self ): """simple docstring""" return self._get_dummy_components() def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def UpperCamelCase__ ( self ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_local() def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
4
0
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : int = {"""vocab_file""": """vocab.json"""} SCREAMING_SNAKE_CASE__ : Tuple = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } SCREAMING_SNAKE_CASE__ : List[Any] = {"""mgp-str""": 27} class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _lowerCAmelCase , _lowerCAmelCase="[GO]" , _lowerCAmelCase="[GO]" , _lowerCAmelCase="[s]" , _lowerCAmelCase="[GO]" , **_lowerCAmelCase ): super().__init__( unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , **_lowerCAmelCase , ) with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle: UpperCAmelCase__ : Optional[int] = json.load(_lowerCAmelCase ) UpperCAmelCase__ : Any = {v: k for k, v in self.vocab.items()} @property def __UpperCAmelCase ( self ): return len(self.vocab ) def __UpperCAmelCase ( self ): return dict(self.vocab , **self.added_tokens_encoder ) def __UpperCAmelCase ( self , _lowerCAmelCase ): UpperCAmelCase__ : Tuple = [] for s in text: char_tokens.extend(_lowerCAmelCase ) return char_tokens def __UpperCAmelCase ( self , _lowerCAmelCase ): return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) ) def __UpperCAmelCase ( self , _lowerCAmelCase ): return self.decoder.get(_lowerCAmelCase ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error("""Vocabulary path ({}) should be a directory""".format(_lowerCAmelCase ) ) return UpperCAmelCase__ : Optional[Any] = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" ) return (vocab_file,)
79
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = self.vocab_size - 1 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case ) lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case__ = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , ) lowerCAmelCase = inputs_dict['labels'] lowerCAmelCase = inputs_dict['labels'] lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , ) lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_snake_case ) return inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenAIGPTModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @require_torch class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(_snake_case ) lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is lowerCAmelCase = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case ) self.assertListEqual(output_ids[0].tolist() , _snake_case )
4
0
class __UpperCamelCase : def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = name __lowercase = val def __str__( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return F'{self.__class__.__name__}({self.name}, {self.val})' def __lt__( self : Optional[int] , _lowerCAmelCase : int ) -> Any: """simple docstring""" return self.val < other.val class __UpperCamelCase : def __init__( self : Union[str, Any] , _lowerCAmelCase : Dict ) -> List[Any]: """simple docstring""" __lowercase = {} __lowercase = {} __lowercase = self.build_heap(_lowerCAmelCase ) def __getitem__( self : List[Any] , _lowerCAmelCase : int ) -> Union[str, Any]: """simple docstring""" return self.get_value(_lowerCAmelCase ) def _a ( self : str , _lowerCAmelCase : Tuple ) -> Union[str, Any]: """simple docstring""" return (idx - 1) // 2 def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" return idx * 2 + 1 def _a ( self : List[Any] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" return idx * 2 + 2 def _a ( self : int , _lowerCAmelCase : Optional[Any] ) -> str: """simple docstring""" return self.heap_dict[key] def _a ( self : Optional[Any] , _lowerCAmelCase : Any ) -> Any: """simple docstring""" __lowercase = len(_lowerCAmelCase ) - 1 __lowercase = self.get_parent_idx(_lowerCAmelCase ) for idx, i in enumerate(_lowerCAmelCase ): __lowercase = idx __lowercase = i.val for i in range(_lowerCAmelCase , -1 , -1 ): self.sift_down(_lowerCAmelCase , _lowerCAmelCase ) return array def _a ( self : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ) -> List[str]: """simple docstring""" while True: __lowercase = self.get_left_child_idx(_lowerCAmelCase ) # noqa: E741 __lowercase = self.get_right_child_idx(_lowerCAmelCase ) __lowercase = idx if l < len(_lowerCAmelCase ) and array[l] < array[idx]: __lowercase = l if r < len(_lowerCAmelCase ) and array[r] < array[smallest]: __lowercase = r if smallest != idx: __lowercase , __lowercase = array[smallest], array[idx] ( ( __lowercase ) , ( __lowercase ) , ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) __lowercase = smallest else: break def _a ( self : int , _lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" __lowercase = self.get_parent_idx(_lowerCAmelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: __lowercase , __lowercase = self.heap[idx], self.heap[p] __lowercase , __lowercase = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) __lowercase = p __lowercase = self.get_parent_idx(_lowerCAmelCase ) def _a ( self : Optional[int] ) -> int: """simple docstring""" return self.heap[0] def _a ( self : str ) -> str: """simple docstring""" __lowercase , __lowercase = self.heap[-1], self.heap[0] __lowercase , __lowercase = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) __lowercase = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def _a ( self : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" self.heap.append(_lowerCAmelCase ) __lowercase = len(self.heap ) - 1 __lowercase = node.val self.sift_up(len(self.heap ) - 1 ) def _a ( self : List[Any] ) -> int: """simple docstring""" return len(self.heap ) == 0 def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ) -> int: """simple docstring""" assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" __lowercase = new_value __lowercase = new_value self.sift_up(self.idx_of_element[node] ) __UpperCamelCase : Tuple = Node("""R""", -1) __UpperCamelCase : Union[str, Any] = Node("""B""", 6) __UpperCamelCase : Optional[Any] = Node("""A""", 3) __UpperCamelCase : Union[str, Any] = Node("""X""", 1) __UpperCamelCase : Any = Node("""E""", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __UpperCamelCase : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("""Min Heap - before decrease key""") for i in my_min_heap.heap: print(i) print("""Min Heap - After decrease key of node [B -> -17]""") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
80
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __UpperCamelCase : str = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' ) parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' ) parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' ) lowerCAmelCase = parser.parse_args() logger.info(F'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]` lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>` lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(F'Loading text from {args.file_path}' ) with open(args.file_path , 'r' , encoding='utf8' ) as fp: lowerCAmelCase = fp.readlines() logger.info('Start encoding' ) logger.info(F'{len(_UpperCAmelCase )} examples to process.' ) lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 1_0000 lowerCAmelCase = time.time() for text in data: lowerCAmelCase = F'{bos} {text.strip()} {sep}' lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) rslt.append(_UpperCAmelCase ) iter += 1 if iter % interval == 0: lowerCAmelCase = time.time() logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) lowerCAmelCase = time.time() logger.info('Finished binarization' ) logger.info(F'{len(_UpperCAmelCase )} examples processed.' ) lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle' lowerCAmelCase = tokenizer.vocab_size if vocab_size < (1 << 16): lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt] else: lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'Dump to {dp_file}' ) with open(_UpperCAmelCase , 'wb' ) as handle: pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
4
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case : Optional[Any] = {"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[Any] = ["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys _snake_case : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
81
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Tuple = { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''', '''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''', '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''', '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''', '''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json''' ), '''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''', # See all BERT models at https://huggingface.co/models?filter=bert } class a ( a__ ): snake_case__ = '''bert''' def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , **_snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache lowerCAmelCase = classifier_dropout class a ( a__ ): @property def UpperCamelCase__ ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
4
0
"""simple docstring""" from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake lowerCamelCase = numpy.array([0, 0]) lowerCamelCase = numpy.array([0.5, 0.8_660_254]) lowerCamelCase = numpy.array([1, 0]) lowerCamelCase = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = initial_vectors for _ in range(lowerCAmelCase__ ): UpperCAmelCase_ = iteration_step(lowerCAmelCase__ ) return vectors def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = [] for i, start_vector in enumerate(vectors[:-1] ): UpperCAmelCase_ = vectors[i + 1] new_vectors.append(lowerCAmelCase__ ) UpperCAmelCase_ = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = numpy.radians(lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ ) UpperCAmelCase_ = numpy.array(((c, -s), (s, c)) ) return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = plt.gca() axes.set_aspect("equal" ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() UpperCAmelCase_ , UpperCAmelCase_ = zip(*lowerCAmelCase__ ) plt.plot(lowerCAmelCase__ , lowerCAmelCase__ ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
82
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a ( a__ , unittest.TestCase ): snake_case__ = DanceDiffusionPipeline snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS snake_case__ = PipelineTesterMixin.required_optional_params - { '''callback''', '''latents''', '''callback_steps''', '''output_type''', '''num_images_per_prompt''', } snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , ) lowerCAmelCase = IPNDMScheduler() lowerCAmelCase = { 'unet': unet, 'scheduler': scheduler, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 4, } return inputs def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = DanceDiffusionPipeline(**_snake_case ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = pipe(**_snake_case ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_save_load_local() @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_save_load_optional_components() @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_attention_slicing_forward_pass() def UpperCamelCase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch_device lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch_device lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
4
0
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase__ = logging.get_logger(__name__) class __snake_case ( _lowercase): snake_case__ : Dict = ["audio_values", "audio_mask"] def __init__( self : List[str] , __lowerCAmelCase : Optional[Any]=2_0_4_8 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=[1_6, 1_6] , __lowerCAmelCase : Optional[Any]=1_2_8 , __lowerCAmelCase : Optional[int]=4_4_1_0_0 , __lowerCAmelCase : Optional[Any]=8_6 , __lowerCAmelCase : Dict=2_0_4_8 , __lowerCAmelCase : Tuple=0.0 , **__lowerCAmelCase : Dict , ): """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : str = spectrogram_length _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = patch_size _lowerCamelCase : Optional[Any] = feature_size // self.patch_size[1] _lowerCamelCase : Any = n_fft _lowerCamelCase : int = sampling_rate // hop_length_to_sampling_rate _lowerCamelCase : Optional[int] = sampling_rate _lowerCamelCase : Optional[Any] = padding_value _lowerCamelCase : str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : np.array ): """simple docstring""" _lowerCamelCase : Tuple = spectrogram( __lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) _lowerCamelCase : Union[str, Any] = log_spec[:, :-1] _lowerCamelCase : Optional[Any] = log_spec - 20.0 _lowerCamelCase : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Optional[Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : List[str] , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) _lowerCamelCase : Union[str, Any] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) _lowerCamelCase : List[str] = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowerCamelCase : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): _lowerCamelCase : Any = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowerCamelCase : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowerCamelCase : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _lowerCamelCase : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): _lowerCamelCase : Union[str, Any] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _lowerCamelCase : List[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _lowerCamelCase : Any = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _lowerCamelCase : str = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding _lowerCamelCase : List[str] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _lowerCamelCase : Optional[int] = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _lowerCamelCase : int = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): _lowerCamelCase : List[str] = audio_features[i] _lowerCamelCase : Optional[Any] = feature # return as BatchFeature if return_attention_mask: _lowerCamelCase : Union[str, Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: _lowerCamelCase : Any = {'''audio_values''': padded_audio_features} _lowerCamelCase : Optional[int] = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
83
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = OpenLlamaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = OpenLlamaModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , ) lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , ) lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() # first forward pass lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , ) lowerCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0] lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else () snake_case__ = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenLlamaModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase = type self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'single_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'multi_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @parameterized.expand([('linear',), ('dynamic',)] ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size ) lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = OpenLlamaModel(_snake_case ) original_model.to(_snake_case ) original_model.eval() lowerCAmelCase = original_model(_snake_case ).last_hidden_state lowerCAmelCase = original_model(_snake_case ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = {'type': scaling_type, 'factor': 10.0} lowerCAmelCase = OpenLlamaModel(_snake_case ) scaled_model.to(_snake_case ) scaled_model.eval() lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
4
0
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class A_ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , snake_case=1000 , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_input_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_labels lowercase = num_choices lowercase = scope lowercase = range_bbox def SCREAMING_SNAKE_CASE__ ( self ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowercase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase = bbox[i, j, 3] lowercase = bbox[i, j, 1] lowercase = t if bbox[i, j, 2] < bbox[i, j, 0]: lowercase = bbox[i, j, 2] lowercase = bbox[i, j, 0] lowercase = t lowercase = tf.convert_to_tensor(snake_case ) lowercase = None if self.use_input_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_token_type_ids: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase = None lowercase = None lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase = ids_tensor([self.batch_size] , self.num_choices ) lowercase = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = TFLayoutLMModel(config=snake_case ) lowercase = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) lowercase = model(snake_case , snake_case , token_type_ids=snake_case ) lowercase = model(snake_case , snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = TFLayoutLMForMaskedLM(config=snake_case ) lowercase = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_labels lowercase = TFLayoutLMForSequenceClassification(config=snake_case ) lowercase = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_labels lowercase = TFLayoutLMForTokenClassification(config=snake_case ) lowercase = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = TFLayoutLMForQuestionAnswering(config=snake_case ) lowercase = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) = config_and_inputs lowercase = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : Optional[Any] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _UpperCamelCase : Tuple = ( { """feature-extraction""": TFLayoutLMModel, """fill-mask""": TFLayoutLMForMaskedLM, """text-classification""": TFLayoutLMForSequenceClassification, """token-classification""": TFLayoutLMForTokenClassification, """zero-shot""": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : List[str] = False _UpperCamelCase : Union[str, Any] = True _UpperCamelCase : Optional[int] = 10 def SCREAMING_SNAKE_CASE__ ( self ): lowercase = TFLayoutLMModelTester(self ) lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) @slow def SCREAMING_SNAKE_CASE__ ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = TFLayoutLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def SCREAMING_SNAKE_CASE__ ( self ): pass def UpperCAmelCase_ ( ): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowercase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 lowercase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowercase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowercase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowercase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class A_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self ): lowercase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) lowercase , lowercase , lowercase , lowercase , lowercase = prepare_layoutlm_batch_inputs() # forward pass lowercase = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the sequence output on [0, :3, :3] lowercase = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1E-3 ) ) # test the pooled output on [1, :3] lowercase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1E-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized sequence classification head lowercase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) lowercase , lowercase , lowercase , lowercase , lowercase = prepare_layoutlm_batch_inputs() # forward pass lowercase = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowercase = outputs.loss lowercase = (2,) self.assertEqual(loss.shape , snake_case ) # test the shape of the logits lowercase = outputs.logits lowercase = (2, 2) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head lowercase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 ) lowercase , lowercase , lowercase , lowercase , lowercase = prepare_layoutlm_batch_inputs() # forward pass lowercase = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) # test the shape of the logits lowercase = outputs.logits lowercase = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head lowercase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) lowercase , lowercase , lowercase , lowercase , lowercase = prepare_layoutlm_batch_inputs() # forward pass lowercase = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the shape of the logits lowercase = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , snake_case ) self.assertEqual(outputs.end_logits.shape , snake_case )
84
"""simple docstring""" from typing import Any class a : def __init__( self , _snake_case ): """simple docstring""" lowerCAmelCase = data lowerCAmelCase = None def __repr__( self ): """simple docstring""" return F'Node({self.data})' class a : def __init__( self ): """simple docstring""" lowerCAmelCase = None def __iter__( self ): """simple docstring""" lowerCAmelCase = self.head while node: yield node.data lowerCAmelCase = node.next def __len__( self ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self ): """simple docstring""" return "->".join([str(_snake_case ) for item in self] ) def __getitem__( self , _snake_case ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , _snake_case , _snake_case ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) lowerCAmelCase = self.head for _ in range(_snake_case ): lowerCAmelCase = current.next lowerCAmelCase = data def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" self.insert_nth(len(self ) , _snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" self.insert_nth(0 , _snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) lowerCAmelCase = Node(_snake_case ) if self.head is None: lowerCAmelCase = new_node elif index == 0: lowerCAmelCase = self.head # link new_node to head lowerCAmelCase = new_node else: lowerCAmelCase = self.head for _ in range(index - 1 ): lowerCAmelCase = temp.next lowerCAmelCase = temp.next lowerCAmelCase = new_node def UpperCamelCase__ ( self ): # print every node data """simple docstring""" print(self ) def UpperCamelCase__ ( self ): """simple docstring""" return self.delete_nth(0 ) def UpperCamelCase__ ( self ): # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def UpperCamelCase__ ( self , _snake_case = 0 ): """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) lowerCAmelCase = self.head # default first node if index == 0: lowerCAmelCase = self.head.next else: lowerCAmelCase = self.head for _ in range(index - 1 ): lowerCAmelCase = temp.next lowerCAmelCase = temp.next lowerCAmelCase = temp.next.next return delete_node.data def UpperCamelCase__ ( self ): """simple docstring""" return self.head is None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = None lowerCAmelCase = self.head while current: # Store the current node's next node. lowerCAmelCase = current.next # Make the current node's next point backwards lowerCAmelCase = prev # Make the previous node be the current node lowerCAmelCase = current # Make the current node the next node (to progress iteration) lowerCAmelCase = next_node # Return prev in order to put the head at the end lowerCAmelCase = prev def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = LinkedList() assert linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_UpperCAmelCase ) == i linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_UpperCAmelCase ) == 9 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowerCAmelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = [ -9, 100, Node(7734_5112 ), 'dlrow olleH', 7, 5555, 0, -192.5_5555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] lowerCAmelCase = LinkedList() for i in test_input: linked_list.insert_tail(_UpperCAmelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowerCAmelCase = linked_list.delete_head() assert result == -9 assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowerCAmelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowerCAmelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(_UpperCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_UpperCAmelCase ) assert ( str(_UpperCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_UpperCAmelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _SCREAMING_SNAKE_CASE (): from doctest import testmod testmod() lowerCAmelCase = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(_UpperCAmelCase ) print('\nReading/changing Node data using indexing:' ) print(F'Element at Position 1: {linked_list[1]}' ) lowerCAmelCase = input('Enter New Value: ' ).strip() print('New list:' ) print(_UpperCAmelCase ) print(F'length of linked_list is : {len(_UpperCAmelCase )}' ) if __name__ == "__main__": main()
4
0
def _a ( lowercase__ : int = 1_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : str = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
85
"""simple docstring""" from __future__ import annotations import requests def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(_UpperCAmelCase ).json() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ): lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories] return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ): lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase ) return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
4
0
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ , A_ = image.size A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0 A_ = image[None].transpose(0 ,3 ,1 ,2 ) A_ = torch.from_numpy(__UpperCamelCase ) return 2.0 * image - 1.0 class _a ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ): if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = 1 elif isinstance(UpperCAmelCase , torch.Tensor ): A_ = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' ) if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = preprocess(UpperCAmelCase ) A_ , A_ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image A_ = (batch_size, self.unet.config.in_channels // 2, height, width) A_ = next(self.unet.parameters() ).dtype A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase ) A_ = image.to(device=self.device , dtype=UpperCAmelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(UpperCAmelCase , device=self.device ) A_ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler A_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ = {} if accepts_eta: A_ = eta for t in self.progress_bar(UpperCAmelCase ): # concat latents and low resolution image in the channel dimension. A_ = torch.cat([latents, image] , dim=1 ) A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) # predict the noise residual A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample # decode the image latents with the VQVAE A_ = self.vqvae.decode(UpperCAmelCase ).sample A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 ) A_ = image / 2 + 0.5 A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A_ = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
86
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ): lowerCAmelCase = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowerCAmelCase = 4 lowerCAmelCase = 48 lowerCAmelCase = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase = [6, 6, 6, 6] lowerCAmelCase = 60 lowerCAmelCase = [6, 6, 6, 6] lowerCAmelCase = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase = 4 lowerCAmelCase = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowerCAmelCase = 1 lowerCAmelCase = 1 lowerCAmelCase = 126 lowerCAmelCase = 7 lowerCAmelCase = 255.0 lowerCAmelCase = '' return config def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ): if "patch_embed.proj" in name and "layers" not in name: lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: lowerCAmelCase = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: lowerCAmelCase = name.replace('attn' , 'attention.self' ) if "norm1" in name: lowerCAmelCase = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowerCAmelCase = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: lowerCAmelCase = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: lowerCAmelCase = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: lowerCAmelCase = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": lowerCAmelCase = 'layernorm.weight' if name == "norm.bias": lowerCAmelCase = 'layernorm.bias' if "conv_first" in name: lowerCAmelCase = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowerCAmelCase = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' ) lowerCAmelCase = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: lowerCAmelCase = 'swin2sr.' + name return name def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ): for key in orig_state_dict.copy().keys(): lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase ) if "qkv" in key: lowerCAmelCase = key.split('.' ) lowerCAmelCase = int(key_split[1] ) lowerCAmelCase = int(key_split[4] ) lowerCAmelCase = config.embed_dim if "weight" in key: lowerCAmelCase = val[:dim, :] lowerCAmelCase = val[dim : dim * 2, :] lowerCAmelCase = val[-dim:, :] else: lowerCAmelCase = val[:dim] lowerCAmelCase = val[dim : dim * 2] lowerCAmelCase = val[-dim:] pass else: lowerCAmelCase = val return orig_state_dict def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ): lowerCAmelCase = get_config(_UpperCAmelCase ) lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase ) model.eval() lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' ) lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' ) lowerCAmelCase = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256 lowerCAmelCase = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 ) if config.num_channels == 1: lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 ) lowerCAmelCase = model(_UpperCAmelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 512, 512] ) lowerCAmelCase = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 512, 512] ) lowerCAmelCase = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 ) print('Looks ok!' ) lowerCAmelCase = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } lowerCAmelCase = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_UpperCAmelCase ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') __UpperCamelCase : Optional[int] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''naver-clova-ix/donut-base-finetuned-docvqa''' UpperCAmelCase__ = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) UpperCAmelCase__ = '''document_qa''' UpperCAmelCase__ = AutoProcessor UpperCAmelCase__ = VisionEncoderDecoderModel UpperCAmelCase__ = ['''image''', '''text'''] UpperCAmelCase__ = ['''text'''] def __init__( self : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Dict) ->List[str]: '''simple docstring''' if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''') super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : "Image" , UpperCAmelCase__ : str) ->Optional[int]: '''simple docstring''' A__ = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' A__ = task_prompt.replace('''{user_input}''' , UpperCAmelCase__) A__ = self.pre_processor.tokenizer( UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors='''pt''').input_ids A__ = self.pre_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[str]) ->Any: '''simple docstring''' return self.model.generate( inputs['''pixel_values'''].to(self.device) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCAmelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCAmelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCAmelCase__ , ).sequences def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int]) ->Dict: '''simple docstring''' A__ = self.pre_processor.batch_decode(UpperCAmelCase__)[0] A__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''') A__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''') A__ = re.sub(R'''<.*?>''' , '''''' , UpperCAmelCase__ , count=1).strip() # remove first task start token A__ = self.pre_processor.tokenajson(UpperCAmelCase__) return sequence["answer"]
87
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class a ( a__ ): snake_case__ = '''megatron-bert''' def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , **_snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache
4
0
"""simple docstring""" import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets UpperCAmelCase = """\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } """ UpperCAmelCase = """\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. """ UpperCAmelCase = """ Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for 'cvit-mkb-clsr' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for 'cvit-mkb-clsr' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"precision\": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} """ def _snake_case ( __snake_case : List[Any] , __snake_case : str ): """simple docstring""" return float((preds == labels).mean() ) def _snake_case ( __snake_case : Optional[int] , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = simple_accuracy(__snake_case , __snake_case ) _lowerCamelCase : Union[str, Any] = float(fa_score(y_true=__snake_case , y_pred=__snake_case ) ) return { "accuracy": acc, "f1": fa, } def _snake_case ( __snake_case : Optional[Any] , __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : Dict = np.array(__snake_case ) _lowerCamelCase : Optional[Any] = np.array(__snake_case ) _lowerCamelCase : List[Any] = en_sentvecs.shape[0] # mean centering _lowerCamelCase : str = en_sentvecs - np.mean(__snake_case , axis=0 ) _lowerCamelCase : Optional[int] = in_sentvecs - np.mean(__snake_case , axis=0 ) _lowerCamelCase : Union[str, Any] = cdist(__snake_case , __snake_case , """cosine""" ) _lowerCamelCase : str = np.array(range(__snake_case ) ) _lowerCamelCase : Optional[Any] = sim.argsort(axis=1 )[:, :10] _lowerCamelCase : Dict = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> Tuple: if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """ """\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """ """\"wiki-ner\"]""") return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""") if self.config_name != """cvit-mkb-clsr""" else datasets.Sequence(datasets.Value("""float32""")), """references""": datasets.Value("""int64""") if self.config_name != """cvit-mkb-clsr""" else datasets.Sequence(datasets.Value("""float32""")), }) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Any: if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)} elif self.config_name in ["wiki-ner"]: return acc_and_fa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)} else: raise KeyError( """You should supply a configuration name selected in """ """[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """ """\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """ """\"wiki-ner\"]""")
88
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
4
0
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float: if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate _lowercase : Any = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly _lowercase : Any = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
89
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class a ( a__ ): snake_case__ = 42 class a ( a__ , a__ ): @register_to_config def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ): """simple docstring""" super().__init__() # pass init params to Encoder lowerCAmelCase = Encoder( in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , ) lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 ) lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case ) lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 ) # pass init params to Decoder lowerCAmelCase = Decoder( in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , ) @apply_forward_hook def UpperCamelCase__ ( self , _snake_case , _snake_case = True ): """simple docstring""" lowerCAmelCase = self.encoder(_snake_case ) lowerCAmelCase = self.quant_conv(_snake_case ) if not return_dict: return (h,) return VQEncoderOutput(latents=_snake_case ) @apply_forward_hook def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ): """simple docstring""" if not force_not_quantize: lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case ) else: lowerCAmelCase = h lowerCAmelCase = self.post_quant_conv(_snake_case ) lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case = True ): """simple docstring""" lowerCAmelCase = sample lowerCAmelCase = self.encode(_snake_case ).latents lowerCAmelCase = self.decode(_snake_case ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_snake_case )
4
0
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = list[list[int]] # assigning initial values to the grid __UpperCAmelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __UpperCAmelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _snake_case ( A , A , A , A ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _snake_case ( A ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _snake_case ( A ) -> Matrix | None: if location := find_empty_location(A ): lowerCAmelCase__ , lowerCAmelCase__ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(A , A , A , A ): lowerCAmelCase__ = digit if sudoku(A ) is not None: return grid lowerCAmelCase__ = 0 return None def _snake_case ( A ) -> None: for row in grid: for cell in row: print(A , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') __UpperCAmelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
90
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping __UpperCamelCase : Optional[Any] = tuple[int, int] class a : def __init__( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = vertices lowerCAmelCase = { (min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items() } def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCAmelCase = weight def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Graph({min(self.vertices )} , {} ) lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 while len(subgraph.vertices ) < len(self.vertices ): lowerCAmelCase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCAmelCase = edge lowerCAmelCase = weight subgraph.add_edge(_snake_case , _snake_case ) return subgraph def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ): lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) ) lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = {} lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 with open(_UpperCAmelCase ) as f: lowerCAmelCase = f.read().strip().split('\n' ) lowerCAmelCase = [line.split(',' ) for line in data] for edgea in range(1 , len(_UpperCAmelCase ) ): for edgea in range(_UpperCAmelCase ): if adjaceny_matrix[edgea][edgea] != "-": lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] ) lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase ) lowerCAmelCase = graph.prims_algorithm() lowerCAmelCase = sum(graph.edges.values() ) lowerCAmelCase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
4
0
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : list[int] ): if not nums: return 0 A = nums[0] A = 0 for num in nums[1:]: A , A = ( max_excluding + num, max(snake_case__ , snake_case__ ), ) return max(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] ) lowerCAmelCase = np.array(_UpperCAmelCase ) lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = (1, 2, 1) lowerCAmelCase = (1, 1, 0, 7) lowerCAmelCase = SARIMAX( _UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase ) lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' ) lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] ) return result[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = regressor.predict(_UpperCAmelCase ) return y_pred[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ): train_user.sort() lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 ) lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 ) lowerCAmelCase = qa - qa lowerCAmelCase = qa - (iqr * 0.1) return low_lim def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ): lowerCAmelCase = 0 lowerCAmelCase = 0 for i in list_vote: if i > actual_result: lowerCAmelCase = not_safe + 1 else: if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]] __UpperCamelCase : Any = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) __UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values) # split data __UpperCamelCase : Dict = normalize_df[:, 2].tolist() __UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist() __UpperCamelCase : List[str] = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist() __UpperCamelCase : Tuple = x[: len(x) - 1] __UpperCamelCase : Any = x[len(x) - 1 :] # for linear regression & sarimax __UpperCamelCase : str = total_date[: len(total_date) - 1] __UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1] __UpperCamelCase : List[Any] = total_match[: len(total_match) - 1] __UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :] __UpperCamelCase : str = total_user[len(total_user) - 1 :] __UpperCamelCase : str = total_match[len(total_match) - 1 :] # voting system with forecasting __UpperCamelCase : Any = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
4
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : Union[str, Any]=None ): '''simple docstring''' lowercase : Optional[Any] =np.random.default_rng(UpperCAmelCase__ ) lowercase : Union[str, Any] =length lowercase : List[Any] =rng.normal(size=(length,) ).astype(np.floataa ) lowercase : Dict =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Tuple ): '''simple docstring''' return self.length def __getitem__( self : Union[str, Any] , UpperCAmelCase__ : Any ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : str=False ): '''simple docstring''' super().__init__() lowercase : Any =torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowercase : List[str] =torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowercase : List[Any] =True def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Any=None ): '''simple docstring''' if self.first_batch: print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) lowercase : Any =False return x * self.a[0] + self.b[0] class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : Optional[Any]=False ): '''simple docstring''' super().__init__() lowercase : Optional[int] =torch.nn.Parameter(torch.tensor(UpperCAmelCase__ ).float() ) lowercase : int =torch.nn.Parameter(torch.tensor(UpperCAmelCase__ ).float() ) lowercase : Tuple =True def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) lowercase : List[Any] =False return x * self.a + self.b def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : int = 16 ) -> Union[str, Any]: from datasets import load_dataset from transformers import AutoTokenizer lowercase : Dict =AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowercase : Optional[int] ={'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} lowercase : Dict =load_dataset('''csv''' , data_files=__magic_name__ ) lowercase : int =datasets['''train'''].unique('''label''' ) lowercase : List[str] ={v: i for i, v in enumerate(__magic_name__ )} def tokenize_function(__magic_name__ : Dict ): # max_length=None => use the model max length (it's actually the default) lowercase : Dict =tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ , padding='''max_length''' ) if "label" in examples: lowercase : List[Any] =[label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase : Optional[int] =datasets.map( __magic_name__ , batched=__magic_name__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__magic_name__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__magic_name__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(__magic_name__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowercase : Union[str, Any] =DataLoader(tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=2 ) lowercase : Tuple =DataLoader(tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=1 ) return train_dataloader, eval_dataloader
92
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , ) lowerCAmelCase = parser.parse_args() return args def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ): if not len(_UpperCAmelCase ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) lowerCAmelCase ,lowerCAmelCase = imgs[0].size lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) ) lowerCAmelCase ,lowerCAmelCase = grid.size for i, img in enumerate(_UpperCAmelCase ): grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) ) return grid def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ): lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase ) lowerCAmelCase = pipeline( _UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) ) lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __UpperCamelCase : Optional[Any] = parse_args() # Load models and create wrapper for stable diffusion __UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') __UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') __UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') __UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') __UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): __UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: __UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id)) __UpperCamelCase : Optional[Any] = pipeline.to(unet.device) __UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) __UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
4
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Optional[int] = """switch_transformers""" __magic_name__ :Optional[Any] = ["""past_key_values"""] __magic_name__ :str = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , __UpperCAmelCase=3_2_1_2_8 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=6_4 , __UpperCAmelCase=2_0_4_8 , __UpperCAmelCase=6_4 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_2 , __UpperCAmelCase=8 , __UpperCAmelCase=False , __UpperCAmelCase=0.01 , __UpperCAmelCase="float32" , __UpperCAmelCase=False , __UpperCAmelCase=3_2 , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=0.0_01 , __UpperCAmelCase=0.0_01 , __UpperCAmelCase=1.0 , __UpperCAmelCase="relu" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = vocab_size lowerCAmelCase__ :int = d_model lowerCAmelCase__ :Union[str, Any] = d_kv lowerCAmelCase__ :Dict = d_ff lowerCAmelCase__ :List[str] = num_sparse_encoder_layers lowerCAmelCase__ :Union[str, Any] = num_layers lowerCAmelCase__ :int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase__ :List[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase__ :Optional[Any] = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase__ :Optional[int] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase__ :List[str] = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase__ :Optional[int] = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase__ :List[str] = num_heads lowerCAmelCase__ :Dict = num_experts lowerCAmelCase__ :Optional[Any] = expert_capacity lowerCAmelCase__ :Union[str, Any] = router_bias lowerCAmelCase__ :Optional[Any] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) lowerCAmelCase__ :str = router_dtype lowerCAmelCase__ :int = router_ignore_padding_tokens lowerCAmelCase__ :Dict = relative_attention_num_buckets lowerCAmelCase__ :Dict = relative_attention_max_distance lowerCAmelCase__ :str = dropout_rate lowerCAmelCase__ :List[Any] = layer_norm_epsilon lowerCAmelCase__ :List[str] = initializer_factor lowerCAmelCase__ :List[Any] = feed_forward_proj lowerCAmelCase__ :str = use_cache lowerCAmelCase__ :Any = add_router_probs lowerCAmelCase__ :List[Any] = router_z_loss_coef lowerCAmelCase__ :List[str] = router_aux_loss_coef lowerCAmelCase__ :Union[str, Any] = self.feed_forward_proj.split('-' ) lowerCAmelCase__ :Dict = act_info[-1] lowerCAmelCase__ :str = act_info[0] == 'gated' if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2: raise ValueError( F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase__ :Tuple = 'gelu_new' super().__init__( pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
93
"""simple docstring""" import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __UpperCamelCase : List[Any] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ): lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}' dest_layers.load_state_dict(layers_to_copy.state_dict() ) __UpperCamelCase : Optional[Any] = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __UpperCamelCase : int = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ): try: lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first' F' {n_student}' ) return list(range(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ): if n_student > n_teacher: raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' ) elif n_teacher == n_student: return list(range(_UpperCAmelCase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ): lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(_UpperCAmelCase , _UpperCAmelCase ): AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval() else: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}' lowerCAmelCase = teacher.config.to_diff_dict() try: lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: lowerCAmelCase = teacher_e if d is None: lowerCAmelCase = teacher_d init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} ) except AttributeError: # T5 if hasattr(teacher.config , 'num_encoder_layers' ): lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: lowerCAmelCase = teacher_e if d is None: lowerCAmelCase = teacher_d if hasattr(teacher.config , 'num_encoder_layers' ): init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} ) else: init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(_UpperCAmelCase ) # Copy weights lowerCAmelCase = teacher.config_class(**_UpperCAmelCase ) lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to' F' {save_path}' ) student.save_pretrained(_UpperCAmelCase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) if d_layers_to_copy is None: lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) try: if hasattr( _UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase ) copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' ) lowerCAmelCase = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(_UpperCAmelCase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
4
0
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=__A ) class UpperCAmelCase_ ( __A ): """simple docstring""" UpperCamelCase_ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase_ = Features({'''text''': Value('''string''' )} ) UpperCamelCase_ = Features({'''summary''': Value('''string''' )} ) UpperCamelCase_ = "text" UpperCamelCase_ = "summary" @property def A__ ( self : int ) -> Dict[str, str]: '''simple docstring''' return {self.text_column: "text", self.summary_column: "summary"}
94
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
0
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''snap-research/efficientformer-l1-300''': ( '''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json''' ), } class UpperCamelCase_ (__A ): __magic_name__ = '''efficientformer''' def __init__( self : int , lowerCAmelCase_ : List[int] = [3, 2, 6, 4] , lowerCAmelCase_ : List[int] = [48, 96, 224, 448] , lowerCAmelCase_ : List[bool] = [True, True, True, True] , lowerCAmelCase_ : int = 448 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 16 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : float = 1e-5 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1e-12 , lowerCAmelCase_ : int = 224 , lowerCAmelCase_ : float = 1e-05 , **lowerCAmelCase_ : List[str] , ) -> None: super().__init__(**lowerCAmelCase_ ) UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Any = hidden_sizes UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Any = layer_norm_eps UpperCAmelCase_ : Any = patch_size UpperCAmelCase_ : Optional[int] = num_channels UpperCAmelCase_ : Any = depths UpperCAmelCase_ : Optional[Any] = mlp_expansion_ratio UpperCAmelCase_ : Tuple = downsamples UpperCAmelCase_ : List[Any] = dim UpperCAmelCase_ : Tuple = key_dim UpperCAmelCase_ : Tuple = attention_ratio UpperCAmelCase_ : Any = resolution UpperCAmelCase_ : Any = pool_size UpperCAmelCase_ : str = downsample_patch_size UpperCAmelCase_ : Optional[Any] = downsample_stride UpperCAmelCase_ : Tuple = downsample_pad UpperCAmelCase_ : Optional[Any] = drop_path_rate UpperCAmelCase_ : List[str] = num_metaad_blocks UpperCAmelCase_ : Optional[Any] = distillation UpperCAmelCase_ : Optional[Any] = use_layer_scale UpperCAmelCase_ : Dict = layer_scale_init_value UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Optional[Any] = batch_norm_eps
95
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): lowerCAmelCase = 0.00 lowerCAmelCase = 0 for resistor in resistors: if resistor <= 0: lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!' raise ValueError(_UpperCAmelCase ) first_sum += 1 / float(_UpperCAmelCase ) index += 1 return 1 / first_sum def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): lowerCAmelCase = 0.00 lowerCAmelCase = 0 for resistor in resistors: sum_r += resistor if resistor < 0: lowerCAmelCase = F'Resistor at index {index} has a negative value!' raise ValueError(_UpperCAmelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
4
0
"""simple docstring""" import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ): UpperCAmelCase__ = GPTSwaTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = True UpperCAmelCase__ = False def lowerCamelCase__ ( self : int ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing __magic_name__: Any = GPTSwaTokenizer(__snake_case , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self : List[Any] , __snake_case : List[Any] ) -> Optional[int]: __magic_name__: Tuple = """This is a test""" __magic_name__: Dict = """This is a test""" return input_text, output_text def lowerCamelCase__ ( self : Dict ) -> str: __magic_name__: Union[str, Any] = """<s>""" __magic_name__: Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case ) def lowerCamelCase__ ( self : Tuple ) -> Optional[int]: __magic_name__: Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(__snake_case ) , 2_0_0_0 ) def lowerCamelCase__ ( self : Any ) -> Union[str, Any]: self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 ) def lowerCamelCase__ ( self : List[str] ) -> List[str]: __magic_name__: List[Any] = GPTSwaTokenizer(__snake_case ) __magic_name__: str = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] ) __magic_name__: Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( __snake_case , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , ) # fmt: on __magic_name__: List[Any] = tokenizer.convert_tokens_to_ids(__snake_case ) self.assertListEqual( __snake_case , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , ) __magic_name__: Tuple = tokenizer.convert_ids_to_tokens(__snake_case ) # fmt: off self.assertListEqual( __snake_case , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]: __magic_name__: Tuple = GPTSwaTokenizer(__snake_case ) __magic_name__: Any = ["""This is a test""", """I was born in 92000, and this is falsé."""] __magic_name__: Dict = [ [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2], [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__snake_case , __snake_case ): self.assertListEqual(tokenizer.encode_fast(__snake_case ) , __snake_case ) # Test that decode_fast returns the input text for text, token_ids in zip(__snake_case , __snake_case ): self.assertEqual(tokenizer.decode_fast(__snake_case ) , __snake_case ) @slow def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]: __magic_name__: Tuple = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off __magic_name__: str = {"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__snake_case , )
96
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : Tuple = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class a ( a__ ): snake_case__ = '''glpn''' def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) lowerCAmelCase = num_channels lowerCAmelCase = num_encoder_blocks lowerCAmelCase = depths lowerCAmelCase = sr_ratios lowerCAmelCase = hidden_sizes lowerCAmelCase = patch_sizes lowerCAmelCase = strides lowerCAmelCase = mlp_ratios lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = drop_path_rate lowerCAmelCase = layer_norm_eps lowerCAmelCase = decoder_hidden_size lowerCAmelCase = max_depth lowerCAmelCase = head_in_index
4
0
from __future__ import annotations from collections.abc import Iterator from typing import Any class lowercase__: """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> str: lowercase_ = data lowercase_ = None class lowercase__: """simple docstring""" def __init__( self : Any ) -> str: lowercase_ = None lowercase_ = None def __iter__( self : str ) -> Iterator[Any]: lowercase_ = self.head while self.head: yield node.data lowercase_ = node.next if node == self.head: break def __len__( self : Tuple ) -> int: return sum(1 for _ in self ) def __repr__( self : Tuple ) -> List[str]: return "->".join(str(SCREAMING_SNAKE_CASE_ ) for item in iter(self ) ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None: self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any ) -> None: self.insert_nth(0 , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> None: if index < 0 or index > len(self ): raise IndexError('''list index out of range.''' ) lowercase_ = Node(SCREAMING_SNAKE_CASE_ ) if self.head is None: lowercase_ = new_node # first node points itself lowercase_ = lowercase_ = new_node elif index == 0: # insert at head lowercase_ = self.head lowercase_ = lowercase_ = new_node else: lowercase_ = self.head for _ in range(index - 1 ): lowercase_ = temp.next lowercase_ = temp.next lowercase_ = new_node if index == len(self ) - 1: # insert at tail lowercase_ = new_node def _lowercase ( self : List[Any] ) -> Dict: return self.delete_nth(0 ) def _lowercase ( self : Any ) -> Any: return self.delete_nth(len(self ) - 1 ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int = 0 ) -> Any: if not 0 <= index < len(self ): raise IndexError('''list index out of range.''' ) lowercase_ = self.head if self.head == self.tail: # just one node lowercase_ = lowercase_ = None elif index == 0: # delete head node lowercase_ = self.tail.next.next lowercase_ = self.head.next else: lowercase_ = self.head for _ in range(index - 1 ): lowercase_ = temp.next lowercase_ = temp.next lowercase_ = temp.next.next if index == len(self ) - 1: # delete at tail lowercase_ = temp return delete_node.data def _lowercase ( self : List[Any] ) -> bool: return len(self ) == 0 def a ( ): '''simple docstring''' lowercase_ = CircularLinkedList() assert len(snake_case__ ) == 0 assert circular_linked_list.is_empty() is True assert str(snake_case__ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(snake_case__ ) == i circular_linked_list.insert_nth(snake_case__ , i + 1 ) assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
97
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = range_bbox def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase = bbox[i, j, 3] lowerCAmelCase = bbox[i, j, 1] lowerCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase = bbox[i, j, 2] lowerCAmelCase = bbox[i, j, 0] lowerCAmelCase = t lowerCAmelCase = tf.convert_to_tensor(_snake_case ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) snake_case__ = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = True snake_case__ = 1_0 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def _SCREAMING_SNAKE_CASE (): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the sequence output on [0, :3, :3] lowerCAmelCase = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) ) # test the pooled output on [1, :3] lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCAmelCase = outputs.loss lowerCAmelCase = (2,) self.assertEqual(loss.shape , _snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = (2, 2) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the shape of the logits lowerCAmelCase = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _snake_case ) self.assertEqual(outputs.end_logits.shape , _snake_case )
4
0
'''simple docstring''' import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def a__ ( lowercase : Dict, lowercase : str, lowercase : str, lowercase : Path, lowercase : str = None, lowercase : str = None, lowercase : str = None, ) -> str: """simple docstring""" if config_name_or_path is None: _UpperCamelCase = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base''' if generator_tokenizer_name_or_path is None: _UpperCamelCase = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: _UpperCamelCase = question_encoder_name_or_path _UpperCamelCase = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration # Save model. _UpperCamelCase = RagConfig.from_pretrained(lowercase ) _UpperCamelCase = AutoConfig.from_pretrained(lowercase ) _UpperCamelCase = AutoConfig.from_pretrained(lowercase ) _UpperCamelCase = gen_config _UpperCamelCase = question_encoder_config _UpperCamelCase = model_class.from_pretrained_question_encoder_generator( lowercase, lowercase, config=lowercase ) rag_model.save_pretrained(lowercase ) # Sanity check. model_class.from_pretrained(lowercase ) # Save tokenizers. _UpperCamelCase = AutoTokenizer.from_pretrained(lowercase ) gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' ) _UpperCamelCase = AutoTokenizer.from_pretrained(lowercase ) question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument( '--model_type', choices=['rag_sequence', 'rag_token'], required=True, type=str, help='RAG model type: rag_sequence, rag_token', ) parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.') parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier') parser.add_argument( '--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier' ) parser.add_argument( '--generator_tokenizer_name_or_path', type=str, help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``', ) parser.add_argument( '--question_encoder_tokenizer_name_or_path', type=str, help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``', ) parser.add_argument( '--config_name_or_path', type=str, help=( 'Identifier of the model config to use, if not provided, resolves to a base config for a given' ' ``model_type``' ), ) lowercase__ : Optional[int] = parser.parse_args() lowercase__ : Union[str, Any] = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
98
"""simple docstring""" import argparse import os import re import packaging.version __UpperCamelCase : Union[str, Any] = '''examples/''' __UpperCamelCase : str = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __UpperCamelCase : List[str] = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __UpperCamelCase : Optional[int] = '''README.md''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ): with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.read() lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern] lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase ) lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): for folder, directories, fnames in os.walk(_UpperCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not patch: update_version_in_examples(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '🤗 Transformers currently provides the following architectures' lowerCAmelCase = '1. Want to contribute a new model?' with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.readlines() # Find the start of the list. lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): lowerCAmelCase = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): with open(REPLACE_FILES['init'] , 'r' ) as f: lowerCAmelCase = f.read() lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0] return packaging.version.parse(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ): lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: lowerCAmelCase = default_version.base_version elif patch: lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' ) if len(_UpperCAmelCase ) == 0: lowerCAmelCase = default_version print(F'Updating version to {version}.' ) global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = get_version() lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0' lowerCAmelCase = current_version.base_version # Check with the user we got that right. lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' ) if len(_UpperCAmelCase ) == 0: lowerCAmelCase = dev_version print(F'Updating version to {version}.' ) global_version_update(_UpperCAmelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __UpperCamelCase : Optional[int] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
4
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __UpperCAmelCase : """simple docstring""" def __init__( self , __A , __A=2 , __A=True , __A=False , __A=10 , __A=3 , __A=32 * 8 , __A=32 * 8 , __A=4 , __A=64 , ): __a = parent __a = batch_size __a = is_training __a = use_auxiliary_loss __a = num_queries __a = num_channels __a = min_size __a = max_size __a = num_labels __a = hidden_dim __a = hidden_dim def snake_case_ ( self ): __a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) __a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) __a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() __a = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() __a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def snake_case_ ( self ): __a = MaskaFormerConfig( hidden_size=self.hidden_dim , ) __a = self.num_queries __a = self.num_labels __a = [1, 1, 1, 1] __a = self.num_channels __a = 64 __a = 128 __a = self.hidden_dim __a = self.hidden_dim __a = self.hidden_dim return config def snake_case_ ( self ): __a , __a , __a , __a , __a = self.prepare_config_and_inputs() __a = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def snake_case_ ( self , __A , __A ): __a = output.encoder_hidden_states __a = output.pixel_decoder_hidden_states __a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_layers ) def snake_case_ ( self , __A , __A , __A , __A=False ): with torch.no_grad(): __a = MaskaFormerModel(config=__A ) model.to(__A ) model.eval() __a = model(pixel_values=__A , pixel_mask=__A ) __a = model(__A , output_hidden_states=__A ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def snake_case_ ( self , __A , __A , __A , __A , __A ): __a = MaskaFormerForUniversalSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __a = model(pixel_values=__A , pixel_mask=__A ) __a = model(__A ) comm_check_on_output(__A ) __a = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __UpperCAmelCase ( __A , __A , unittest.TestCase ): """simple docstring""" _lowerCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _lowerCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case_ ( self ): __a = MaskaFormerModelTester(self ) __a = ConfigTester(self , config_class=__A , has_text_modality=__A ) def snake_case_ ( self ): self.config_tester.run_common_tests() def snake_case_ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__A , **__A , output_hidden_states=__A ) def snake_case_ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""" ) def snake_case_ ( self ): pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" ) def snake_case_ ( self ): pass @unittest.skip(reason="""Mask2Former is not a generative model""" ) def snake_case_ ( self ): pass @unittest.skip(reason="""Mask2Former does not use token embeddings""" ) def snake_case_ ( self ): pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def snake_case_ ( self ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def snake_case_ ( self ): pass def snake_case_ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__A ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __A ) @slow def snake_case_ ( self ): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __a = MaskaFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def snake_case_ ( self ): __a = (self.model_tester.min_size,) * 2 __a = { """pixel_values""": torch.randn((2, 3, *size) , device=__A ), """mask_labels""": torch.randn((2, 10, *size) , device=__A ), """class_labels""": torch.zeros(2 , 10 , device=__A ).long(), } __a = self.model_tester.get_config() __a = MaskaFormerForUniversalSegmentation(__A ).to(__A ) __a = model(**__A ) self.assertTrue(outputs.loss is not None ) def snake_case_ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__A , **__A , output_hidden_states=__A ) def snake_case_ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__A ).to(__A ) __a = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def snake_case_ ( self ): if not self.model_tester.is_training: return __a = self.all_model_classes[1] __a , __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = model_class(__A ) model.to(__A ) model.train() __a = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def snake_case_ ( self ): __a = self.all_model_classes[1] __a , __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = True __a = True __a = model_class(__A ).to(__A ) model.train() __a = model(__A , mask_labels=__A , class_labels=__A ) __a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) SCREAMING_SNAKE_CASE = 1E-4 def a (): __a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case_ ( self ): return "facebook/mask2former-swin-small-coco-instance" @cached_property def snake_case_ ( self ): return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def snake_case_ ( self ): __a = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__A ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(__A , return_tensors="""pt""" ).to(__A ) __a = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 384, 384) ) with torch.no_grad(): __a = model(**__A ) __a = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) __a = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) __a = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def snake_case_ ( self ): __a = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__A ).eval() __a = self.default_image_processor __a = prepare_img() __a = image_processor(__A , return_tensors="""pt""" ).to(__A ) __a = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 384, 384) ) with torch.no_grad(): __a = model(**__A ) # masks_queries_logits __a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __a = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] __a = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits __a = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) __a = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def snake_case_ ( self ): __a = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__A ).eval() __a = self.default_image_processor __a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) __a = inputs["""pixel_values"""].to(__A ) __a = [el.to(__A ) for el in inputs["""mask_labels"""]] __a = [el.to(__A ) for el in inputs["""class_labels"""]] with torch.no_grad(): __a = model(**__A ) self.assertTrue(outputs.loss is not None )
99
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __UpperCamelCase : Optional[int] = pytest.mark.integration @require_faiss class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} ) return dset def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() lowerCAmelCase = dset.map( lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case ) lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def UpperCamelCase__ ( self ): """simple docstring""" from elasticsearch import Elasticsearch lowerCAmelCase = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: lowerCAmelCase = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} lowerCAmelCase = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=_snake_case ) lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1] lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case ) self.assertRaises(_snake_case , index.search_batch , queries[0] ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) lowerCAmelCase = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_snake_case ): lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = faiss.IndexFlat(5 ) lowerCAmelCase = FaissIndex(custom_index=_snake_case ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def UpperCamelCase__ ( self ): """simple docstring""" import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file: index.save(tmp_file.name ) lowerCAmelCase = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ): import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) lowerCAmelCase = 'index.faiss' lowerCAmelCase = F'mock://{index_name}' index.save(_UpperCAmelCase , storage_options=mockfs.storage_options ) lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options ) lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: lowerCAmelCase = Elasticsearch() lowerCAmelCase = {'acknowledged': True} lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query lowerCAmelCase = 'foo' lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout lowerCAmelCase = 'foo' lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries lowerCAmelCase = ['foo', 'bar', 'foobar'] lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([1, 1, 1] , _snake_case ) # batched queries with timeout lowerCAmelCase = ['foo', 'bar', 'foobar'] lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(_snake_case ) , 0 ) self.assertListEqual([1, 1, 1] , _snake_case )
4
0
def __snake_case ( lowerCAmelCase_ = 1_0_0 ) -> int: SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(F'{solution() = }')
100
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class a ( a__ , a__ , unittest.TestCase ): snake_case__ = IFInpaintingPipeline snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase__ ( self ): """simple docstring""" return self._get_dummy_components() def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def UpperCamelCase__ ( self ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_local() def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
4
0
from collections import Counter from timeit import timeit def a__ ( A__ = "", ): return sum(c % 2 for c in Counter(input_str.replace(' ', '' ).lower() ).values() ) < 2 def a__ ( A__ = "" ): if len(A__ ) == 0: return True SCREAMING_SNAKE_CASE_ : Optional[int] = input_str.replace(' ', '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string SCREAMING_SNAKE_CASE_ : dict[str, int] = {} for character in lower_case_input_str: SCREAMING_SNAKE_CASE_ : Optional[Any] = character_freq_dict.get(A__, 0 ) + 1 SCREAMING_SNAKE_CASE_ : str = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def a__ ( A__ = "" ): print('\nFor string = ', A__, ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()', '\tans =', can_string_be_rearranged_as_palindrome_counter(A__ ), '\ttime =', timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)', setup='import __main__ as z', ), 'seconds', ) print( '> can_string_be_rearranged_as_palindrome()', '\tans =', can_string_be_rearranged_as_palindrome(A__ ), '\ttime =', timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)', setup='import __main__ as z', ), 'seconds', ) if __name__ == "__main__": lowerCAmelCase__ : int =input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) lowerCAmelCase__ : List[Any] =can_string_be_rearranged_as_palindrome_counter(check_str) print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
101
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = self.vocab_size - 1 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case ) lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case__ = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , ) lowerCAmelCase = inputs_dict['labels'] lowerCAmelCase = inputs_dict['labels'] lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , ) lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_snake_case ) return inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenAIGPTModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @require_torch class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(_snake_case ) lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is lowerCAmelCase = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case ) self.assertListEqual(output_ids[0].tolist() , _snake_case )
4
0
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file __magic_name__ : Any = """Run commands across TPU VMs for initial setup before running `accelerate launch`.""" def UpperCamelCase (SCREAMING_SNAKE_CASE=None ): if subparsers is not None: UpperCamelCase : Optional[Any] = subparsers.add_parser("""tpu-config""" , description=_description ) else: UpperCamelCase : Optional[Any] = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description ) # Core arguments UpperCamelCase : Any = parser.add_argument_group( """Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help="""Path to the config file to use for accelerate.""" , ) config_args.add_argument( """--tpu_name""" , default=SCREAMING_SNAKE_CASE , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , ) config_args.add_argument( """--tpu_zone""" , default=SCREAMING_SNAKE_CASE , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , ) UpperCamelCase : int = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , ) pod_args.add_argument( """--command_file""" , default=SCREAMING_SNAKE_CASE , help="""The path to the file containing the commands to run on the pod on startup.""" , ) pod_args.add_argument( """--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , ) pod_args.add_argument( """--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , ) pod_args.add_argument( """--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , ) pod_args.add_argument( """--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=SCREAMING_SNAKE_CASE ) return parser def UpperCamelCase (SCREAMING_SNAKE_CASE ): UpperCamelCase : Any = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ): UpperCamelCase : Union[str, Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: UpperCamelCase : Tuple = defaults.command_file if not args.command and defaults.commands is not None: UpperCamelCase : Optional[int] = defaults.commands if not args.tpu_name: UpperCamelCase : Dict = defaults.tpu_name if not args.tpu_zone: UpperCamelCase : Optional[Any] = defaults.tpu_zone if args.accelerate_version == "dev": UpperCamelCase : int = """git+https://github.com/huggingface/accelerate.git""" elif args.accelerate_version == "latest": UpperCamelCase : Dict = """accelerate -U""" elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ): UpperCamelCase : int = f"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file , """r""" ) as f: UpperCamelCase : Optional[Any] = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ): UpperCamelCase : Union[str, Any] = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate UpperCamelCase : Optional[int] = ["""cd /usr/share"""] if args.install_accelerate: new_cmd += [f"""pip install {args.accelerate_version}"""] new_cmd += args.command UpperCamelCase : Tuple = """; """.join(SCREAMING_SNAKE_CASE ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess UpperCamelCase : int = ["""gcloud"""] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f"""Running {" ".join(SCREAMING_SNAKE_CASE )}""" ) return subprocess.run(SCREAMING_SNAKE_CASE ) print("""Successfully setup pod.""" ) def UpperCamelCase (): UpperCamelCase : Any = tpu_command_parser() UpperCamelCase : int = parser.parse_args() tpu_command_launcher(SCREAMING_SNAKE_CASE )
102
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __UpperCamelCase : str = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' ) parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' ) parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' ) lowerCAmelCase = parser.parse_args() logger.info(F'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]` lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>` lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(F'Loading text from {args.file_path}' ) with open(args.file_path , 'r' , encoding='utf8' ) as fp: lowerCAmelCase = fp.readlines() logger.info('Start encoding' ) logger.info(F'{len(_UpperCAmelCase )} examples to process.' ) lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 1_0000 lowerCAmelCase = time.time() for text in data: lowerCAmelCase = F'{bos} {text.strip()} {sep}' lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) rslt.append(_UpperCAmelCase ) iter += 1 if iter % interval == 0: lowerCAmelCase = time.time() logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) lowerCAmelCase = time.time() logger.info('Finished binarization' ) logger.info(F'{len(_UpperCAmelCase )} examples processed.' ) lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle' lowerCAmelCase = tokenizer.vocab_size if vocab_size < (1 << 16): lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt] else: lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'Dump to {dp_file}' ) with open(_UpperCAmelCase , 'wb' ) as handle: pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
4
0
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case = logging.get_logger(__name__) snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} snake_case = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } snake_case = { '''allenai/longformer-base-4096''': 4_0_9_6, '''allenai/longformer-large-4096''': 4_0_9_6, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4_0_9_6, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4_0_9_6, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4_0_9_6, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def snake_case ( ) -> Tuple: _snake_case = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) _snake_case = bs[:] _snake_case = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCAmelCase_ ) cs.append(2**8 + n ) n += 1 _snake_case = [chr(lowerCAmelCase_ ) for n in cs] return dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) def snake_case ( lowerCAmelCase_ ) -> Optional[Any]: _snake_case = set() _snake_case = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _snake_case = char return pairs class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): A__ : Dict = VOCAB_FILES_NAMES A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]="replace" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : str="<s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Any="<pad>" , __lowerCamelCase : Optional[Any]="<mask>" , __lowerCamelCase : List[str]=False , **__lowerCamelCase : Optional[Any] , ): """simple docstring""" _snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token _snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token _snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token _snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token _snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token _snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle: _snake_case = json.load(__lowerCamelCase ) _snake_case = {v: k for k, v in self.encoder.items()} _snake_case = errors # how to handle errors in decoding _snake_case = bytes_to_unicode() _snake_case = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle: _snake_case = merges_handle.read().split('''\n''' )[1:-1] _snake_case = [tuple(merge.split() ) for merge in bpe_merges] _snake_case = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) _snake_case = {} _snake_case = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" return len(self.encoder ) def __UpperCAmelCase ( self : int ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ): """simple docstring""" if token in self.cache: return self.cache[token] _snake_case = tuple(__lowerCamelCase ) _snake_case = get_pairs(__lowerCamelCase ) if not pairs: return token while True: _snake_case = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _snake_case , _snake_case = bigram _snake_case = [] _snake_case = 0 while i < len(__lowerCamelCase ): try: _snake_case = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _snake_case = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _snake_case = tuple(__lowerCamelCase ) _snake_case = new_word if len(__lowerCamelCase ) == 1: break else: _snake_case = get_pairs(__lowerCamelCase ) _snake_case = ''' '''.join(__lowerCamelCase ) _snake_case = word return word def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int ): """simple docstring""" _snake_case = [] for token in re.findall(self.pat , __lowerCamelCase ): _snake_case = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[Any] ): """simple docstring""" return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Dict ): """simple docstring""" return self.decoder.get(__lowerCamelCase ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] ): """simple docstring""" _snake_case = ''''''.join(__lowerCamelCase ) _snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _snake_case = os.path.join( __lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case = os.path.join( __lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '''\n''' ) _snake_case = 0 with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) _snake_case = token_index writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case = [self.cls_token_id] _snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Dict ): """simple docstring""" _snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): _snake_case = ''' ''' + text return (text, kwargs)
103
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Tuple = { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''', '''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''', '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''', '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''', '''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json''' ), '''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''', # See all BERT models at https://huggingface.co/models?filter=bert } class a ( a__ ): snake_case__ = '''bert''' def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , **_snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache lowerCAmelCase = classifier_dropout class a ( a__ ): @property def UpperCamelCase__ ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
4
0
"""simple docstring""" def _lowerCamelCase ( UpperCAmelCase_ : Optional[int] ) -> str: """simple docstring""" A__ = [0] * len(UpperCAmelCase_ ) A__ = [] A__ = [1] * len(UpperCAmelCase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(UpperCAmelCase_ ) ): if indegree[i] == 0: queue.append(UpperCAmelCase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(UpperCAmelCase_ ) print(max(UpperCAmelCase_ ) ) # Adjacency list of Graph UpperCamelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
104
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a ( a__ , unittest.TestCase ): snake_case__ = DanceDiffusionPipeline snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS snake_case__ = PipelineTesterMixin.required_optional_params - { '''callback''', '''latents''', '''callback_steps''', '''output_type''', '''num_images_per_prompt''', } snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , ) lowerCAmelCase = IPNDMScheduler() lowerCAmelCase = { 'unet': unet, 'scheduler': scheduler, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 4, } return inputs def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = DanceDiffusionPipeline(**_snake_case ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = pipe(**_snake_case ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_save_load_local() @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_save_load_optional_components() @skip_mps def UpperCamelCase__ ( self ): """simple docstring""" return super().test_attention_slicing_forward_pass() def UpperCamelCase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch_device lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch_device lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa ) lowerCAmelCase = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
4
0
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase__ : int = TypeVar('''DatasetType''', Dataset, IterableDataset) def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[List[float]] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType: """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(lowerCamelCase_ ): if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ): if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' ) if i == 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = ( (Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ ) else: return _interleave_iterable_datasets( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ ) def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : int = 0 , ) -> DatasetType: """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(lowerCamelCase_ ): if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ): if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' ) if i == 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = ( (Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ ) else: return _concatenate_iterable_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
105
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = OpenLlamaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = OpenLlamaModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , ) lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , ) lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() # first forward pass lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , ) lowerCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0] lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else () snake_case__ = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenLlamaModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase = type self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'single_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'multi_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @parameterized.expand([('linear',), ('dynamic',)] ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size ) lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = OpenLlamaModel(_snake_case ) original_model.to(_snake_case ) original_model.eval() lowerCAmelCase = original_model(_snake_case ).last_hidden_state lowerCAmelCase = original_model(_snake_case ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = {'type': scaling_type, 'factor': 10.0} lowerCAmelCase = OpenLlamaModel(_snake_case ) scaled_model.to(_snake_case ) scaled_model.eval() lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
4
0
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowerCAmelCase__ : def __init__( self : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : List[str]=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : int=False , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=False , __UpperCamelCase : List[str]=False , __UpperCamelCase : Optional[int]=19 , __UpperCamelCase : Any=32 , __UpperCamelCase : Union[str, Any]=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Tuple=37 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Tuple=512 , __UpperCamelCase : str=16 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : int=3 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Optional[int]=None , ) -> Optional[Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def __UpperCamelCase ( self : Any ) -> str: A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A = ids_tensor([self.batch_size] , self.num_choices ) A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: A = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__UpperCamelCase , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ) -> Optional[int]: A = EsmForProteinFolding(config=__UpperCamelCase ).float() model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , attention_mask=__UpperCamelCase ) A = model(__UpperCamelCase ) A = model(__UpperCamelCase ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def __UpperCamelCase ( self : Any ) -> Dict: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): A_ : Dict = False A_ : List[Any] = (EsmForProteinFolding,) if is_torch_available() else () A_ : Tuple = () A_ : List[Any] = {} if is_torch_available() else {} A_ : str = False def __UpperCamelCase ( self : List[Any] ) -> str: A = EsmFoldModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: self.config_tester.run_common_tests() def __UpperCamelCase ( self : List[str] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) @unittest.skip('Does not support attention outputs' ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip def __UpperCamelCase ( self : Dict ) -> List[str]: pass @unittest.skip('Esm does not support embedding resizing' ) def __UpperCamelCase ( self : List[str] ) -> Any: pass @unittest.skip('Esm does not support embedding resizing' ) def __UpperCamelCase ( self : List[str] ) -> Optional[int]: pass @unittest.skip('ESMFold does not support passing input embeds!' ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skip('ESMFold does not support head pruning.' ) def __UpperCamelCase ( self : Tuple ) -> Tuple: pass @unittest.skip('ESMFold does not support head pruning.' ) def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: pass @unittest.skip('ESMFold does not support head pruning.' ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: pass @unittest.skip('ESMFold does not support head pruning.' ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skip('ESMFold does not support head pruning.' ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: pass @unittest.skip('ESMFold does not output hidden states in the normal way.' ) def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: pass @unittest.skip('ESMfold does not output hidden states in the normal way.' ) def __UpperCamelCase ( self : Any ) -> Optional[int]: pass @unittest.skip('ESMFold only has one output format.' ) def __UpperCamelCase ( self : Optional[int] ) -> Any: pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' ) def __UpperCamelCase ( self : Tuple ) -> Tuple: pass @unittest.skip('ESMFold does not support input chunking.' ) def __UpperCamelCase ( self : Any ) -> Dict: pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' ) def __UpperCamelCase ( self : Optional[Any] ) -> Any: pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def __UpperCamelCase ( self : Optional[Any] ) -> Dict: pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def __UpperCamelCase ( self : str ) -> List[Any]: pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def __UpperCamelCase ( self : str ) -> Optional[Any]: pass @unittest.skip('ESMFold doesn\'t support data parallel.' ) def __UpperCamelCase ( self : List[Any] ) -> Any: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __UpperCamelCase ( self : List[Any] ) -> int: pass @require_torch class lowerCAmelCase__ ( _lowerCamelCase ): @slow def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: A = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float() model.eval() A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A = model(__UpperCamelCase )['positions'] A = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __UpperCamelCase , atol=1e-4 ) )
106
"""simple docstring""" from typing import Any class a : def __init__( self , _snake_case ): """simple docstring""" lowerCAmelCase = data lowerCAmelCase = None def __repr__( self ): """simple docstring""" return F'Node({self.data})' class a : def __init__( self ): """simple docstring""" lowerCAmelCase = None def __iter__( self ): """simple docstring""" lowerCAmelCase = self.head while node: yield node.data lowerCAmelCase = node.next def __len__( self ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self ): """simple docstring""" return "->".join([str(_snake_case ) for item in self] ) def __getitem__( self , _snake_case ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , _snake_case , _snake_case ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) lowerCAmelCase = self.head for _ in range(_snake_case ): lowerCAmelCase = current.next lowerCAmelCase = data def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" self.insert_nth(len(self ) , _snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" self.insert_nth(0 , _snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) lowerCAmelCase = Node(_snake_case ) if self.head is None: lowerCAmelCase = new_node elif index == 0: lowerCAmelCase = self.head # link new_node to head lowerCAmelCase = new_node else: lowerCAmelCase = self.head for _ in range(index - 1 ): lowerCAmelCase = temp.next lowerCAmelCase = temp.next lowerCAmelCase = new_node def UpperCamelCase__ ( self ): # print every node data """simple docstring""" print(self ) def UpperCamelCase__ ( self ): """simple docstring""" return self.delete_nth(0 ) def UpperCamelCase__ ( self ): # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def UpperCamelCase__ ( self , _snake_case = 0 ): """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) lowerCAmelCase = self.head # default first node if index == 0: lowerCAmelCase = self.head.next else: lowerCAmelCase = self.head for _ in range(index - 1 ): lowerCAmelCase = temp.next lowerCAmelCase = temp.next lowerCAmelCase = temp.next.next return delete_node.data def UpperCamelCase__ ( self ): """simple docstring""" return self.head is None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = None lowerCAmelCase = self.head while current: # Store the current node's next node. lowerCAmelCase = current.next # Make the current node's next point backwards lowerCAmelCase = prev # Make the previous node be the current node lowerCAmelCase = current # Make the current node the next node (to progress iteration) lowerCAmelCase = next_node # Return prev in order to put the head at the end lowerCAmelCase = prev def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = LinkedList() assert linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_UpperCAmelCase ) == i linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_UpperCAmelCase ) == 9 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowerCAmelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = [ -9, 100, Node(7734_5112 ), 'dlrow olleH', 7, 5555, 0, -192.5_5555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] lowerCAmelCase = LinkedList() for i in test_input: linked_list.insert_tail(_UpperCAmelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowerCAmelCase = linked_list.delete_head() assert result == -9 assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowerCAmelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowerCAmelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(_UpperCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_UpperCAmelCase ) assert ( str(_UpperCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_UpperCAmelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _SCREAMING_SNAKE_CASE (): from doctest import testmod testmod() lowerCAmelCase = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(_UpperCAmelCase ) print('\nReading/changing Node data using indexing:' ) print(F'Element at Position 1: {linked_list[1]}' ) lowerCAmelCase = input('Enter New Value: ' ).strip() print('New list:' ) print(_UpperCAmelCase ) print(F'length of linked_list is : {len(_UpperCAmelCase )}' ) if __name__ == "__main__": main()
4
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : str = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[str] = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys _UpperCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
107
"""simple docstring""" from __future__ import annotations import requests def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(_UpperCAmelCase ).json() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ): lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories] return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ): lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase ) return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
4
0
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Tuple: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",) _UpperCAmelCase = torch.permute(__snake_case , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ): # linear layer _UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",) _UpperCAmelCase = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> List[str]: if "metadata" in layer: _UpperCAmelCase = layer.split("""metadata""" ) _UpperCAmelCase = """""".join(split_layer[0] )[:-1] _UpperCAmelCase = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: _UpperCAmelCase = layer.split("""kvstore""" ) _UpperCAmelCase = """""".join(split_layer[0] )[:-1] _UpperCAmelCase = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: _UpperCAmelCase = layer.split("""/""" ) _UpperCAmelCase = """/""".join(split_layer[:-1] ) _UpperCAmelCase = (split_layer[-1],) if "kvstore/path" in layer: _UpperCAmelCase = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: _UpperCAmelCase = """file""" else: _UpperCAmelCase = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Optional[Any]: _UpperCAmelCase = rename_keys(__snake_case ) _UpperCAmelCase = {} for k, v in current_block.items(): _UpperCAmelCase = v _UpperCAmelCase = new_current_block torch.save(__snake_case , __snake_case ) def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = WEIGHTS_NAME ) -> Any: _UpperCAmelCase = convert_file_size_to_int(__snake_case ) _UpperCAmelCase = [] _UpperCAmelCase = {} _UpperCAmelCase = 0 _UpperCAmelCase = 0 os.makedirs(__snake_case , exist_ok=__snake_case ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: _UpperCAmelCase = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] _UpperCAmelCase = flatten_dict(__snake_case , sep="""/""" ) _UpperCAmelCase = {} for layer in checkpoint_info.keys(): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_key_and_tensorstore_dict( __snake_case , __snake_case , __snake_case ) if curr_real_layer_name in all_layers: _UpperCAmelCase = content else: _UpperCAmelCase = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() _UpperCAmelCase = torch.tensor(__snake_case ) _UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts _UpperCAmelCase , _UpperCAmelCase = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __snake_case ) _UpperCAmelCase = """/""".join(__snake_case ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _UpperCAmelCase = os.path.join( __snake_case , weights_name.replace(""".bin""" , f"""-{len(__snake_case )+1:05d}-of-???.bin""" ) ) rename_and_save_block(__snake_case , __snake_case ) sharded_state_dicts.append(current_block.keys() ) del current_block _UpperCAmelCase = {} _UpperCAmelCase = 0 _UpperCAmelCase = raw_weights.to(getattr(__snake_case , __snake_case ) ) current_block_size += weight_size total_size += weight_size # Add the last block _UpperCAmelCase = os.path.join(__snake_case , weights_name.replace(""".bin""" , f"""-{len(__snake_case )+1:05d}-of-???.bin""" ) ) rename_and_save_block(__snake_case , __snake_case ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__snake_case ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _UpperCAmelCase = {} _UpperCAmelCase = {} for idx, shard in enumerate(__snake_case ): _UpperCAmelCase = weights_name.replace( """.bin""" , f"""-{idx+1:05d}-of-{len(__snake_case ):05d}.bin""" ) # len(sharded_state_dicts):05d} _UpperCAmelCase = os.path.join(__snake_case , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) _UpperCAmelCase = shard for key in shard: _UpperCAmelCase = shard_file # Add the metadata _UpperCAmelCase = {"""total_size""": total_size} _UpperCAmelCase = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(__snake_case , __snake_case ) , """w""" , encoding="""utf-8""" ) as f: _UpperCAmelCase = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + """\n""" f.write(__snake_case ) return metadata, index if __name__ == "__main__": __a: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''') parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''', type=str, required=False, help='''Path to the output pytorch model.''', ) __a: int = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _UpperCAmelCase = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) _UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) _UpperCAmelCase = TaTokenizer.from_pretrained("""t5-small""" ) _UpperCAmelCase = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" _UpperCAmelCase = tokenizer(__snake_case , return_tensors="""pt""" ).input_ids _UpperCAmelCase = model.generate(__snake_case , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
108
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ): lowerCAmelCase = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowerCAmelCase = 4 lowerCAmelCase = 48 lowerCAmelCase = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase = [6, 6, 6, 6] lowerCAmelCase = 60 lowerCAmelCase = [6, 6, 6, 6] lowerCAmelCase = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase = 4 lowerCAmelCase = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowerCAmelCase = 1 lowerCAmelCase = 1 lowerCAmelCase = 126 lowerCAmelCase = 7 lowerCAmelCase = 255.0 lowerCAmelCase = '' return config def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ): if "patch_embed.proj" in name and "layers" not in name: lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: lowerCAmelCase = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: lowerCAmelCase = name.replace('attn' , 'attention.self' ) if "norm1" in name: lowerCAmelCase = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowerCAmelCase = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: lowerCAmelCase = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: lowerCAmelCase = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: lowerCAmelCase = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": lowerCAmelCase = 'layernorm.weight' if name == "norm.bias": lowerCAmelCase = 'layernorm.bias' if "conv_first" in name: lowerCAmelCase = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowerCAmelCase = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' ) lowerCAmelCase = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: lowerCAmelCase = 'swin2sr.' + name return name def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ): for key in orig_state_dict.copy().keys(): lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase ) if "qkv" in key: lowerCAmelCase = key.split('.' ) lowerCAmelCase = int(key_split[1] ) lowerCAmelCase = int(key_split[4] ) lowerCAmelCase = config.embed_dim if "weight" in key: lowerCAmelCase = val[:dim, :] lowerCAmelCase = val[dim : dim * 2, :] lowerCAmelCase = val[-dim:, :] else: lowerCAmelCase = val[:dim] lowerCAmelCase = val[dim : dim * 2] lowerCAmelCase = val[-dim:] pass else: lowerCAmelCase = val return orig_state_dict def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ): lowerCAmelCase = get_config(_UpperCAmelCase ) lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase ) model.eval() lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' ) lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' ) lowerCAmelCase = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256 lowerCAmelCase = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 ) if config.num_channels == 1: lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 ) lowerCAmelCase = model(_UpperCAmelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 512, 512] ) lowerCAmelCase = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 512, 512] ) lowerCAmelCase = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase = torch.Size([1, 3, 1024, 1024] ) lowerCAmelCase = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 ) print('Looks ok!' ) lowerCAmelCase = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } lowerCAmelCase = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_UpperCAmelCase ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') __UpperCamelCase : Optional[int] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class __a ( _snake_case ): __UpperCamelCase : Optional[int] = 'fnet' def __init__( self : Dict ,lowerCamelCase : List[Any]=3_2000 ,lowerCamelCase : Dict=768 ,lowerCamelCase : Dict=12 ,lowerCamelCase : Tuple=3072 ,lowerCamelCase : Union[str, Any]="gelu_new" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Dict=0.02 ,lowerCamelCase : Optional[Any]=1E-1_2 ,lowerCamelCase : Optional[Any]=False ,lowerCamelCase : Dict=512 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Dict=2 ,**lowerCamelCase : List[str] ,): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = use_tpu_fourier_optimizations __SCREAMING_SNAKE_CASE = tpu_short_seq_length
109
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class a ( a__ ): snake_case__ = '''megatron-bert''' def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , **_snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache
4
0
"""simple docstring""" import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class a ( lowercase , lowercase , lowercase ): @register_to_config def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False , ): super().__init__() UpperCAmelCase__ : Optional[int] = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase__ : Optional[Any] = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : str = nn.Dropout(p=UpperCamelCase_ ) UpperCAmelCase__ : List[str] = TaConfig( vocab_size=UpperCamelCase_ , d_model=UpperCamelCase_ , num_heads=UpperCamelCase_ , d_kv=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ , feed_forward_proj=UpperCamelCase_ , is_decoder=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , ) UpperCAmelCase__ : Any = nn.ModuleList() for lyr_num in range(UpperCamelCase_ ): UpperCAmelCase__ : Tuple = TaBlock(UpperCamelCase_ ) self.encoders.append(UpperCamelCase_ ) UpperCAmelCase__ : Any = TaLayerNorm(UpperCamelCase_ ) UpperCAmelCase__ : int = nn.Dropout(p=UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : Dict = self.token_embedder(UpperCamelCase_ ) UpperCAmelCase__ : Dict = encoder_input_tokens.shape[1] UpperCAmelCase__ : List[Any] = torch.arange(UpperCamelCase_ , device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = self.dropout_pre(UpperCamelCase_ ) # inverted the attention mask UpperCAmelCase__ : int = encoder_input_tokens.size() UpperCAmelCase__ : Tuple = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ ) for lyr in self.encoders: UpperCAmelCase__ : Any = lyr(UpperCamelCase_ , UpperCamelCase_ )[0] UpperCAmelCase__ : List[str] = self.layer_norm(UpperCamelCase_ ) return self.dropout_post(UpperCamelCase_ ), encoder_inputs_mask
110
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
4
0
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCamelCase ( a__ , a__ , a__ ): lowercase : Optional[int] = [R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias'] @register_to_config def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 5_0257 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 768 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "gelu_new" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 1e-5 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , ): super().__init__() UpperCamelCase : List[Any] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and' f' `n_embd`: {n_embd} are not equal.' ) UpperCamelCase : int = prefix_inner_dim UpperCamelCase : Dict = prefix_hidden_dim UpperCamelCase : Union[str, Any] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) UpperCamelCase : Tuple = ( nn.Linear(self.prefix_hidden_dim , _snake_case ) if self.prefix_hidden_dim is not None else nn.Identity() ) UpperCamelCase : Any = GPTaConfig( vocab_size=_snake_case , n_positions=_snake_case , n_embd=_snake_case , n_layer=_snake_case , n_head=_snake_case , n_inner=_snake_case , activation_function=_snake_case , resid_pdrop=_snake_case , embd_pdrop=_snake_case , attn_pdrop=_snake_case , layer_norm_epsilon=_snake_case , initializer_range=_snake_case , scale_attn_weights=_snake_case , use_cache=_snake_case , scale_attn_by_inverse_layer_idx=_snake_case , reorder_and_upcast_attn=_snake_case , ) UpperCamelCase : str = GPTaLMHeadModel(_snake_case ) def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ): UpperCamelCase : Optional[int] = self.transformer.transformer.wte(_snake_case ) UpperCamelCase : List[str] = self.encode_prefix(_snake_case ) UpperCamelCase : Optional[Any] = self.decode_prefix(_snake_case ) UpperCamelCase : str = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: UpperCamelCase : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) UpperCamelCase : List[str] = torch.cat((dummy_token, input_ids) , dim=1 ) UpperCamelCase : str = self.transformer(inputs_embeds=_snake_case , labels=_snake_case , attention_mask=_snake_case ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return torch.zeros(_snake_case , self.prefix_length , dtype=torch.intaa , device=_snake_case ) def a_ ( self , SCREAMING_SNAKE_CASE_ ): return self.encode_prefix(_snake_case ) @torch.no_grad() def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = torch.split(_snake_case , 1 , dim=0 ) UpperCamelCase : List[Any] = [] UpperCamelCase : Tuple = [] for feature in features: UpperCamelCase : Optional[int] = self.decode_prefix(feature.to(_snake_case ) ) # back to the clip feature # Only support beam search for now UpperCamelCase , UpperCamelCase : str = self.generate_beam( input_embeds=_snake_case , device=_snake_case , eos_token_id=_snake_case ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) UpperCamelCase : Dict = torch.stack(_snake_case ) UpperCamelCase : Dict = torch.stack(_snake_case ) return generated_tokens, generated_seq_lengths @torch.no_grad() def a_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = 5 , SCREAMING_SNAKE_CASE_ = 67 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ): UpperCamelCase : Tuple = eos_token_id UpperCamelCase : int = None UpperCamelCase : Union[str, Any] = None UpperCamelCase : Union[str, Any] = torch.ones(_snake_case , device=_snake_case , dtype=torch.int ) UpperCamelCase : Optional[Any] = torch.zeros(_snake_case , device=_snake_case , dtype=torch.bool ) if input_embeds is not None: UpperCamelCase : Tuple = input_embeds else: UpperCamelCase : Union[str, Any] = self.transformer.transformer.wte(_snake_case ) for i in range(_snake_case ): UpperCamelCase : Dict = self.transformer(inputs_embeds=_snake_case ) UpperCamelCase : Optional[int] = outputs.logits UpperCamelCase : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) UpperCamelCase : List[Any] = logits.softmax(-1 ).log() if scores is None: UpperCamelCase , UpperCamelCase : Optional[Any] = logits.topk(_snake_case , -1 ) UpperCamelCase : Dict = generated.expand(_snake_case , *generated.shape[1:] ) UpperCamelCase , UpperCamelCase : int = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: UpperCamelCase : Optional[int] = next_tokens else: UpperCamelCase : Any = tokens.expand(_snake_case , *tokens.shape[1:] ) UpperCamelCase : List[str] = torch.cat((tokens, next_tokens) , dim=1 ) else: UpperCamelCase : Union[str, Any] = -float(np.inf ) UpperCamelCase : List[Any] = 0 UpperCamelCase : Union[str, Any] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 UpperCamelCase : Tuple = scores_sum / seq_lengths[:, None] UpperCamelCase , UpperCamelCase : str = scores_sum_average.view(-1 ).topk(_snake_case , -1 ) UpperCamelCase : int = next_tokens // scores_sum.shape[1] UpperCamelCase : List[Any] = seq_lengths[next_tokens_source] UpperCamelCase : int = next_tokens % scores_sum.shape[1] UpperCamelCase : Optional[int] = next_tokens.unsqueeze(1 ) UpperCamelCase : Any = tokens[next_tokens_source] UpperCamelCase : int = torch.cat((tokens, next_tokens) , dim=1 ) UpperCamelCase : List[Any] = generated[next_tokens_source] UpperCamelCase : List[Any] = scores_sum_average * seq_lengths UpperCamelCase : int = is_stopped[next_tokens_source] UpperCamelCase : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) UpperCamelCase : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 ) UpperCamelCase : List[Any] = is_stopped + next_tokens.eq(_snake_case ).squeeze() if is_stopped.all(): break UpperCamelCase : Dict = scores / seq_lengths UpperCamelCase : str = scores.argsort(descending=_snake_case ) # tokens tensors are already padded to max_seq_length UpperCamelCase : Optional[int] = [tokens[i] for i in order] UpperCamelCase : int = torch.stack(_snake_case , dim=0 ) UpperCamelCase : int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
499
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class a ( a__ ): snake_case__ = 42 class a ( a__ , a__ ): @register_to_config def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ): """simple docstring""" super().__init__() # pass init params to Encoder lowerCAmelCase = Encoder( in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , ) lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 ) lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case ) lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 ) # pass init params to Decoder lowerCAmelCase = Decoder( in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , ) @apply_forward_hook def UpperCamelCase__ ( self , _snake_case , _snake_case = True ): """simple docstring""" lowerCAmelCase = self.encoder(_snake_case ) lowerCAmelCase = self.quant_conv(_snake_case ) if not return_dict: return (h,) return VQEncoderOutput(latents=_snake_case ) @apply_forward_hook def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ): """simple docstring""" if not force_not_quantize: lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case ) else: lowerCAmelCase = h lowerCAmelCase = self.post_quant_conv(_snake_case ) lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case = True ): """simple docstring""" lowerCAmelCase = sample lowerCAmelCase = self.encode(_snake_case ).latents lowerCAmelCase = self.decode(_snake_case ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_snake_case )
4
0
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self : str , lowerCAmelCase : List[Any] , lowerCAmelCase : str=13 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : int=True , lowerCAmelCase : str=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Union[str, Any]=99 , lowerCAmelCase : str=32 , lowerCAmelCase : int=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Any=37 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : str=16 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Tuple=4 , ) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = parent __lowerCAmelCase : int = batch_size __lowerCAmelCase : Optional[int] = seq_length __lowerCAmelCase : List[str] = is_training __lowerCAmelCase : Optional[int] = use_attention_mask __lowerCAmelCase : Optional[Any] = use_token_type_ids __lowerCAmelCase : Dict = use_labels __lowerCAmelCase : List[Any] = vocab_size __lowerCAmelCase : Any = hidden_size __lowerCAmelCase : Union[str, Any] = num_hidden_layers __lowerCAmelCase : int = num_attention_heads __lowerCAmelCase : List[str] = intermediate_size __lowerCAmelCase : List[str] = hidden_act __lowerCAmelCase : Optional[Any] = hidden_dropout_prob __lowerCAmelCase : List[str] = attention_probs_dropout_prob __lowerCAmelCase : str = max_position_embeddings __lowerCAmelCase : Dict = type_vocab_size __lowerCAmelCase : Optional[int] = type_sequence_label_size __lowerCAmelCase : Union[str, Any] = initializer_range __lowerCAmelCase : Any = num_choices def SCREAMING_SNAKE_CASE ( self : Any ) -> str: """simple docstring""" __lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : Any = None if self.use_attention_mask: __lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : Union[str, Any] = None if self.use_token_type_ids: __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Union[str, Any] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: """simple docstring""" __lowerCAmelCase : Any = self.prepare_config_and_inputs() __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Any = config_and_inputs __lowerCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowerCAmelCase : List[Any] = self.prepare_config_and_inputs() __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = config_and_inputs __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] =True lowerCamelCase : int =( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowerCAmelCase : Optional[int] = FlaxBertModelTester(self ) @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: """simple docstring""" __lowerCAmelCase : int = FlaxBertModel.from_pretrained("""bert-base-cased""" ) __lowerCAmelCase : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(_snake_case )
651
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping __UpperCamelCase : Optional[Any] = tuple[int, int] class a : def __init__( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = vertices lowerCAmelCase = { (min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items() } def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCAmelCase = weight def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Graph({min(self.vertices )} , {} ) lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 while len(subgraph.vertices ) < len(self.vertices ): lowerCAmelCase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCAmelCase = edge lowerCAmelCase = weight subgraph.add_edge(_snake_case , _snake_case ) return subgraph def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ): lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) ) lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = {} lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 with open(_UpperCAmelCase ) as f: lowerCAmelCase = f.read().strip().split('\n' ) lowerCAmelCase = [line.split(',' ) for line in data] for edgea in range(1 , len(_UpperCAmelCase ) ): for edgea in range(_UpperCAmelCase ): if adjaceny_matrix[edgea][edgea] != "-": lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] ) lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase ) lowerCAmelCase = graph.prims_algorithm() lowerCAmelCase = sum(graph.edges.values() ) lowerCAmelCase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
4
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
412
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] ) lowerCAmelCase = np.array(_UpperCAmelCase ) lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = (1, 2, 1) lowerCAmelCase = (1, 1, 0, 7) lowerCAmelCase = SARIMAX( _UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase ) lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' ) lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] ) return result[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ): lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = regressor.predict(_UpperCAmelCase ) return y_pred[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ): train_user.sort() lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 ) lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 ) lowerCAmelCase = qa - qa lowerCAmelCase = qa - (iqr * 0.1) return low_lim def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ): lowerCAmelCase = 0 lowerCAmelCase = 0 for i in list_vote: if i > actual_result: lowerCAmelCase = not_safe + 1 else: if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]] __UpperCamelCase : Any = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) __UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values) # split data __UpperCamelCase : Dict = normalize_df[:, 2].tolist() __UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist() __UpperCamelCase : List[str] = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist() __UpperCamelCase : Tuple = x[: len(x) - 1] __UpperCamelCase : Any = x[len(x) - 1 :] # for linear regression & sarimax __UpperCamelCase : str = total_date[: len(total_date) - 1] __UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1] __UpperCamelCase : List[Any] = total_match[: len(total_match) - 1] __UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :] __UpperCamelCase : str = total_user[len(total_user) - 1 :] __UpperCamelCase : str = total_match[len(total_match) - 1 :] # voting system with forecasting __UpperCamelCase : Any = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
4
0
from __future__ import annotations class a : """simple docstring""" def __init__( self : Dict , __lowercase : int , __lowercase : Tuple ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase : int = text, pattern __UpperCAmelCase , __UpperCAmelCase : str = len(_snake_case ), len(_snake_case ) def UpperCAmelCase ( self : str , __lowercase : Any ) -> str: for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Union[str, Any] ) -> Optional[int]: for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def UpperCAmelCase ( self : int ) -> Any: __UpperCAmelCase : List[Any] = [] for i in range(self.textLen - self.patLen + 1 ): __UpperCAmelCase : Union[str, Any] = self.mismatch_in_text(_snake_case ) if mismatch_index == -1: positions.append(_snake_case ) else: __UpperCAmelCase : List[str] = self.match_in_pattern(self.text[mismatch_index] ) __UpperCAmelCase : Any = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions a : int = '''ABAABA''' a : List[str] = '''AB''' a : Any = BoyerMooreSearch(text, pattern) a : List[Any] = bms.bad_character_heuristic() if len(positions) == 0: print("No match found") else: print("Pattern found in following positions: ") print(positions)
63
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , ) lowerCAmelCase = parser.parse_args() return args def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ): if not len(_UpperCAmelCase ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) lowerCAmelCase ,lowerCAmelCase = imgs[0].size lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) ) lowerCAmelCase ,lowerCAmelCase = grid.size for i, img in enumerate(_UpperCAmelCase ): grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) ) return grid def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ): lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase ) lowerCAmelCase = pipeline( _UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) ) lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __UpperCamelCase : Optional[Any] = parse_args() # Load models and create wrapper for stable diffusion __UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') __UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') __UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') __UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') __UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): __UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: __UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id)) __UpperCamelCase : Optional[Any] = pipeline.to(unet.device) __UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) __UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
4
0
import os import sys import unittest lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase_ = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") lowercase_ = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = get_test_to_tester_mapping(_snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = get_test_to_tester_mapping(_snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = {'''BertModelTest''': '''BertModelTester'''} __SCREAMING_SNAKE_CASE : List[Any] = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case ) self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = get_model_to_test_mapping(_snake_case ) __SCREAMING_SNAKE_CASE : Tuple = get_model_to_test_mapping(_snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } __SCREAMING_SNAKE_CASE : Tuple = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case ) self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = get_model_to_tester_mapping(_snake_case ) __SCREAMING_SNAKE_CASE : int = get_model_to_tester_mapping(_snake_case ) __SCREAMING_SNAKE_CASE : Tuple = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } __SCREAMING_SNAKE_CASE : List[Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case ) self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
74
"""simple docstring""" import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __UpperCamelCase : List[Any] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ): lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}' dest_layers.load_state_dict(layers_to_copy.state_dict() ) __UpperCamelCase : Optional[Any] = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __UpperCamelCase : int = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ): try: lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first' F' {n_student}' ) return list(range(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ): if n_student > n_teacher: raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' ) elif n_teacher == n_student: return list(range(_UpperCAmelCase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ): lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(_UpperCAmelCase , _UpperCAmelCase ): AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval() else: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}' lowerCAmelCase = teacher.config.to_diff_dict() try: lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: lowerCAmelCase = teacher_e if d is None: lowerCAmelCase = teacher_d init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} ) except AttributeError: # T5 if hasattr(teacher.config , 'num_encoder_layers' ): lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: lowerCAmelCase = teacher_e if d is None: lowerCAmelCase = teacher_d if hasattr(teacher.config , 'num_encoder_layers' ): init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} ) else: init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(_UpperCAmelCase ) # Copy weights lowerCAmelCase = teacher.config_class(**_UpperCAmelCase ) lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to' F' {save_path}' ) student.save_pretrained(_UpperCAmelCase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) if d_layers_to_copy is None: lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) try: if hasattr( _UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase ) copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' ) lowerCAmelCase = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(_UpperCAmelCase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
4
0
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging UpperCAmelCase : List[Any] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Optional[Any]: __A : Optional[int] = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F"""{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}""" dest_layers.load_state_dict(layers_to_copy.state_dict() ) UpperCAmelCase : Optional[Any] = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } UpperCAmelCase : int = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def _SCREAMING_SNAKE_CASE ( a , a ) -> Any: try: __A : str = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first""" F""" {n_student}""" ) return list(range(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE ( a , a ) -> List[str]: if n_student > n_teacher: raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" ) elif n_teacher == n_student: return list(range(_UpperCAmelCase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Union[str, Any]: __A : int = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(_UpperCAmelCase , _UpperCAmelCase ): AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience __A : Dict = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval() else: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F"""teacher must be a model or string got type {type(_UpperCAmelCase )}""" __A : List[str] = teacher.config.to_diff_dict() try: __A , __A : str = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: __A : str = teacher_e if d is None: __A : List[Any] = teacher_d init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} ) except AttributeError: # T5 if hasattr(teacher.config , 'num_encoder_layers' ): __A , __A : List[str] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: __A , __A : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: __A : Optional[int] = teacher_e if d is None: __A : Tuple = teacher_d if hasattr(teacher.config , 'num_encoder_layers' ): init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} ) else: init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(_UpperCAmelCase ) # Copy weights __A : Dict = teacher.config_class(**_UpperCAmelCase ) __A : List[str] = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. __A : List[Any] = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save __A , __A : Any = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) ) logger.info( F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to""" F""" {save_path}""" ) student.save_pretrained(_UpperCAmelCase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: __A : Union[str, Any] = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) if d_layers_to_copy is None: __A : Dict = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase ) try: if hasattr( _UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase ) copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase ) logger.info( F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" ) __A : Dict = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(_UpperCAmelCase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
239
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
0
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING a = logging.get_logger(__name__) @add_end_docstrings(a__ ) class __a ( a__ ): def __init__( self : List[str] ,*lowerCamelCase : Optional[Any] ,**lowerCamelCase : Any ): '''simple docstring''' super().__init__(*_snake_case ,**_snake_case ) requires_backends(self ,"""vision""" ) self.check_model_type(_snake_case ) def __call__( self : str ,lowerCamelCase : int ,**lowerCamelCase : int ): '''simple docstring''' return super().__call__(_snake_case ,**_snake_case ) def UpperCAmelCase__ ( self : List[Any] ,**lowerCamelCase : List[str] ): '''simple docstring''' return {}, {}, {} def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_image(_snake_case ) __SCREAMING_SNAKE_CASE = image.size __SCREAMING_SNAKE_CASE = self.image_processor(images=_snake_case ,return_tensors=self.framework ) return model_inputs def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model(**_snake_case ) return model_outputs def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = model_outputs.predicted_depth __SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode="""bicubic""" ,align_corners=_snake_case ) __SCREAMING_SNAKE_CASE = prediction.squeeze().cpu().numpy() __SCREAMING_SNAKE_CASE = (output * 255 / np.max(_snake_case )).astype("""uint8""" ) __SCREAMING_SNAKE_CASE = Image.fromarray(_snake_case ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = predicted_depth __SCREAMING_SNAKE_CASE = depth return output_dict
109
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): lowerCAmelCase = 0.00 lowerCAmelCase = 0 for resistor in resistors: if resistor <= 0: lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!' raise ValueError(_UpperCAmelCase ) first_sum += 1 / float(_UpperCAmelCase ) index += 1 return 1 / first_sum def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): lowerCAmelCase = 0.00 lowerCAmelCase = 0 for resistor in resistors: sum_r += resistor if resistor < 0: lowerCAmelCase = F'Resistor at index {index} has a negative value!' raise ValueError(_UpperCAmelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
4
0
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def __lowercase( ): """simple docstring""" lowerCamelCase , lowerCamelCase = 9, 14 # noqa: F841 lowerCamelCase = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowerCamelCase = defaultdict(_UpperCAmelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) lowerCamelCase = mst(_UpperCAmelCase ) lowerCamelCase = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: lowerCamelCase = tuple(answer[:2] ) lowerCamelCase = tuple(edge[::-1] ) assert edge in result or reverse in result
623
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : Tuple = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class a ( a__ ): snake_case__ = '''glpn''' def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) lowerCAmelCase = num_channels lowerCAmelCase = num_encoder_blocks lowerCAmelCase = depths lowerCAmelCase = sr_ratios lowerCAmelCase = hidden_sizes lowerCAmelCase = patch_sizes lowerCAmelCase = strides lowerCAmelCase = mlp_ratios lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = drop_path_rate lowerCAmelCase = layer_norm_eps lowerCAmelCase = decoder_hidden_size lowerCAmelCase = max_depth lowerCAmelCase = head_in_index
4
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase :List[Any] = logging.get_logger(__name__) class _lowerCAmelCase ( a__ ): __SCREAMING_SNAKE_CASE : int = ['pixel_values'] def __init__(self , lowercase = True , lowercase = None , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ): super().__init__(**_snake_case ) A_ : List[Any] = size if size is not None else {"""shortest_edge""": 384} A_ : Optional[Any] = get_size_dict(_snake_case , default_to_square=_snake_case ) A_ : Any = do_resize A_ : Any = size # Default value set here for backwards compatibility where the value in config is None A_ : Union[str, Any] = crop_pct if crop_pct is not None else 224 / 256 A_ : Tuple = resample A_ : Dict = do_rescale A_ : Dict = rescale_factor A_ : int = do_normalize A_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def _a (self , lowercase , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ): A_ : int = get_size_dict(_snake_case , default_to_square=_snake_case ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A_ : Tuple = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A_ : Any = int(shortest_edge / crop_pct ) A_ : Optional[Any] = get_resize_output_image_size(_snake_case , size=_snake_case , default_to_square=_snake_case ) A_ : str = resize(image=_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_snake_case , size=(shortest_edge, shortest_edge) , data_format=_snake_case , **_snake_case ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _snake_case , size=(shortest_edge, shortest_edge) , resample=_snake_case , data_format=_snake_case , **_snake_case ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case ) def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case ) def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): A_ : List[Any] = do_resize if do_resize is not None else self.do_resize A_ : int = crop_pct if crop_pct is not None else self.crop_pct A_ : Tuple = resample if resample is not None else self.resample A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize A_ : Any = image_mean if image_mean is not None else self.image_mean A_ : Tuple = image_std if image_std is not None else self.image_std A_ : Any = size if size is not None else self.size A_ : int = get_size_dict(_snake_case , default_to_square=_snake_case ) A_ : List[str] = make_list_of_images(_snake_case ) if not valid_images(_snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A_ : str = [to_numpy_array(_snake_case ) for image in images] if do_resize: A_ : Tuple = [self.resize(image=_snake_case , size=_snake_case , crop_pct=_snake_case , resample=_snake_case ) for image in images] if do_rescale: A_ : Optional[int] = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images] if do_normalize: A_ : List[str] = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images] A_ : str = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images] A_ : Any = {"""pixel_values""": images} return BatchFeature(data=_snake_case , tensor_type=_snake_case )
667
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = range_bbox def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase = bbox[i, j, 3] lowerCAmelCase = bbox[i, j, 1] lowerCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase = bbox[i, j, 2] lowerCAmelCase = bbox[i, j, 0] lowerCAmelCase = t lowerCAmelCase = tf.convert_to_tensor(_snake_case ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) snake_case__ = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = True snake_case__ = 1_0 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def _SCREAMING_SNAKE_CASE (): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the sequence output on [0, :3, :3] lowerCAmelCase = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) ) # test the pooled output on [1, :3] lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCAmelCase = outputs.loss lowerCAmelCase = (2,) self.assertEqual(loss.shape , _snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = (2, 2) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the shape of the logits lowerCAmelCase = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _snake_case ) self.assertEqual(outputs.end_logits.shape , _snake_case )
4
0
"""simple docstring""" import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset __snake_case : Optional[Any] = '''bert-base-cased''' __snake_case : str = '''google/pegasus-xsum''' __snake_case : int = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] __snake_case : Dict = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] __snake_case : Any = '''patrickvonplaten/t5-tiny-random''' __snake_case : List[Any] = '''sshleifer/bart-tiny-random''' __snake_case : Dict = '''sshleifer/tiny-mbart''' __snake_case : List[Any] = '''sshleifer/tiny-marian-en-de''' def _lowercase ( __snake_case ,__snake_case ) -> Tuple: __lowerCAmelCase : Tuple = "\n".join(_UpperCAmelCase ) Path(_UpperCAmelCase ).open("w" ).writelines(_UpperCAmelCase ) def _lowercase ( __snake_case ) -> Dict: for split in ["train", "val", "test"]: _dump_articles(os.path.join(_UpperCAmelCase ,F"""{split}.source""" ) ,_UpperCAmelCase ) _dump_articles(os.path.join(_UpperCAmelCase ,F"""{split}.target""" ) ,_UpperCAmelCase ) return tmp_dir class A__ ( a__ ): '''simple docstring''' @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Any) -> str: """simple docstring""" __lowerCAmelCase : Any = AutoTokenizer.from_pretrained(_snake_case) __lowerCAmelCase : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) __lowerCAmelCase : Tuple = max(len(tokenizer.encode(_snake_case)) for a in ARTICLES) __lowerCAmelCase : Optional[Any] = max(len(tokenizer.encode(_snake_case)) for a in SUMMARIES) __lowerCAmelCase : Any = 4 __lowerCAmelCase : Tuple = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated __lowerCAmelCase , __lowerCAmelCase : Dict = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. __lowerCAmelCase : str = SeqaSeqDataset( _snake_case , data_dir=_snake_case , type_path="train" , max_source_length=_snake_case , max_target_length=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , ) __lowerCAmelCase : Dict = DataLoader(_snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn) for batch in dataloader: assert isinstance(_snake_case , _snake_case) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place __lowerCAmelCase : Tuple = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED]) def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any]) -> List[Any]: """simple docstring""" __lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_snake_case) __lowerCAmelCase : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) __lowerCAmelCase : Optional[int] = max(len(tokenizer.encode(_snake_case)) for a in ARTICLES) __lowerCAmelCase : Union[str, Any] = max(len(tokenizer.encode(_snake_case)) for a in SUMMARIES) __lowerCAmelCase : Union[str, Any] = 4 __lowerCAmelCase : int = LegacySeqaSeqDataset( _snake_case , data_dir=_snake_case , type_path="train" , max_source_length=20 , max_target_length=_snake_case , ) __lowerCAmelCase : Dict = DataLoader(_snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def _SCREAMING_SNAKE_CASE ( self: Any) -> str: """simple docstring""" __lowerCAmelCase : str = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") __lowerCAmelCase : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) __lowerCAmelCase : Tuple = tmp_dir.joinpath("train.source").open().readlines() __lowerCAmelCase : Optional[int] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) pack_data_dir(_snake_case , _snake_case , 128 , _snake_case) __lowerCAmelCase : Any = {x.name for x in tmp_dir.iterdir()} __lowerCAmelCase : Tuple = {x.name for x in save_dir.iterdir()} __lowerCAmelCase : Tuple = save_dir.joinpath("train.source").open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_snake_case) < len(_snake_case) assert len(_snake_case) == 1 assert len(packed_examples[0]) == sum(len(_snake_case) for x in orig_examples) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq") def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]: """simple docstring""" if not FAIRSEQ_AVAILABLE: return __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = self._get_dataset(max_len=64) __lowerCAmelCase : Union[str, Any] = 64 __lowerCAmelCase : str = ds.make_dynamic_sampler(_snake_case , required_batch_size_multiple=_snake_case) __lowerCAmelCase : Tuple = [len(_snake_case) for x in batch_sampler] assert len(set(_snake_case)) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_snake_case) == len(_snake_case) # no dropped or added examples __lowerCAmelCase : Any = DataLoader(_snake_case , batch_sampler=_snake_case , collate_fn=ds.collate_fn , num_workers=2) __lowerCAmelCase : Optional[Any] = [] __lowerCAmelCase : Dict = [] for batch in data_loader: __lowerCAmelCase : str = batch["input_ids"].shape __lowerCAmelCase : List[str] = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple __lowerCAmelCase : Tuple = np.product(batch["input_ids"].shape) num_src_per_batch.append(_snake_case) if num_src_tokens > (max_tokens * 1.1): failures.append(_snake_case) assert num_src_per_batch[0] == max(_snake_case) if failures: raise AssertionError(F"""too many tokens in {len(_snake_case)} batches""") def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = self._get_dataset(max_len=512) __lowerCAmelCase : List[str] = 2 __lowerCAmelCase : str = ds.make_sortish_sampler(_snake_case , shuffle=_snake_case) __lowerCAmelCase : Optional[Any] = DataLoader(_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn , num_workers=2) __lowerCAmelCase : Tuple = DataLoader(_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn , num_workers=2 , sampler=_snake_case) __lowerCAmelCase : Tuple = tokenizer.pad_token_id def count_pad_tokens(_SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]="input_ids"): return [batch[k].eq(_snake_case).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_snake_case , k="labels")) < sum(count_pad_tokens(_snake_case , k="labels")) assert sum(count_pad_tokens(_snake_case)) < sum(count_pad_tokens(_snake_case)) assert len(_snake_case) == len(_snake_case) def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: int=1000 , _SCREAMING_SNAKE_CASE: Any=128) -> str: """simple docstring""" if os.getenv("USE_REAL_DATA" , _snake_case): __lowerCAmelCase : List[Any] = "examples/seq2seq/wmt_en_ro" __lowerCAmelCase : Optional[int] = max_len * 2 * 64 if not Path(_snake_case).joinpath("train.len").exists(): save_len_file(_snake_case , _snake_case) else: __lowerCAmelCase : Union[str, Any] = "examples/seq2seq/test_data/wmt_en_ro" __lowerCAmelCase : List[str] = max_len * 4 save_len_file(_snake_case , _snake_case) __lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(_snake_case) __lowerCAmelCase : str = SeqaSeqDataset( _snake_case , data_dir=_snake_case , type_path="train" , max_source_length=_snake_case , max_target_length=_snake_case , n_obs=_snake_case , ) return ds, max_tokens, tokenizer def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self._get_dataset() __lowerCAmelCase : List[Any] = set(DistributedSortishSampler(_snake_case , 256 , num_replicas=2 , rank=0 , add_extra_examples=_snake_case)) __lowerCAmelCase : Optional[int] = set(DistributedSortishSampler(_snake_case , 256 , num_replicas=2 , rank=1 , add_extra_examples=_snake_case)) assert idsa.intersection(_snake_case) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[str]) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(_snake_case , use_fast=_snake_case) if tok_name == MBART_TINY: __lowerCAmelCase : Dict = SeqaSeqDataset( _snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) __lowerCAmelCase : List[str] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: __lowerCAmelCase : Dict = SeqaSeqDataset( _snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path="train" , max_source_length=4 , max_target_length=8 , ) __lowerCAmelCase : Optional[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_snake_case) == 1 if tok_name == BART_TINY else len(_snake_case) == 0
293
"""simple docstring""" import argparse import os import re import packaging.version __UpperCamelCase : Union[str, Any] = '''examples/''' __UpperCamelCase : str = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __UpperCamelCase : List[str] = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __UpperCamelCase : Optional[int] = '''README.md''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ): with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.read() lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern] lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase ) lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): for folder, directories, fnames in os.walk(_UpperCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not patch: update_version_in_examples(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '🤗 Transformers currently provides the following architectures' lowerCAmelCase = '1. Want to contribute a new model?' with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.readlines() # Find the start of the list. lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): lowerCAmelCase = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): with open(REPLACE_FILES['init'] , 'r' ) as f: lowerCAmelCase = f.read() lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0] return packaging.version.parse(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ): lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: lowerCAmelCase = default_version.base_version elif patch: lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' ) if len(_UpperCAmelCase ) == 0: lowerCAmelCase = default_version print(F'Updating version to {version}.' ) global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = get_version() lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0' lowerCAmelCase = current_version.base_version # Check with the user we got that right. lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' ) if len(_UpperCAmelCase ) == 0: lowerCAmelCase = dev_version print(F'Updating version to {version}.' ) global_version_update(_UpperCAmelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __UpperCamelCase : Optional[int] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
4
0